aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c3
-rw-r--r--drivers/net/3c505.c15
-rw-r--r--drivers/net/3c507.c5
-rw-r--r--drivers/net/3c509.c11
-rw-r--r--drivers/net/3c515.c3
-rw-r--r--drivers/net/3c523.c14
-rw-r--r--drivers/net/3c527.c17
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/7990.c7
-rw-r--r--drivers/net/8139cp.c82
-rw-r--r--drivers/net/8139too.c197
-rw-r--r--drivers/net/82596.c17
-rw-r--r--drivers/net/Kconfig111
-rw-r--r--drivers/net/Makefile7
-rw-r--r--drivers/net/a2065.c7
-rw-r--r--drivers/net/acenic.c5
-rw-r--r--drivers/net/amd8111e.c22
-rw-r--r--drivers/net/amd8111e.h1
-rw-r--r--drivers/net/appletalk/cops.c1
-rw-r--r--drivers/net/appletalk/ipddp.c1
-rw-r--r--drivers/net/appletalk/ltpc.c3
-rw-r--r--drivers/net/arcnet/arc-rawmode.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c1
-rw-r--r--drivers/net/arcnet/capmode.c1
-rw-r--r--drivers/net/arcnet/com20020-isa.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/arcnet/com20020.c1
-rw-r--r--drivers/net/arcnet/com90io.c1
-rw-r--r--drivers/net/arcnet/com90xx.c1
-rw-r--r--drivers/net/arcnet/rfc1051.c1
-rw-r--r--drivers/net/arcnet/rfc1201.c1
-rw-r--r--drivers/net/ariadne.c5
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/arm/am79c961a.c12
-rw-r--r--drivers/net/arm/at91_ether.c10
-rw-r--r--drivers/net/arm/ep93xx_eth.c141
-rw-r--r--drivers/net/arm/ether3.c2
-rw-r--r--drivers/net/arm/etherh.c1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c14
-rw-r--r--drivers/net/arm/ks8695net.c50
-rw-r--r--drivers/net/arm/w90p910_ether.c9
-rw-r--r--drivers/net/at1700.c9
-rw-r--r--drivers/net/atarilance.c5
-rw-r--r--drivers/net/atl1c/atl1c.h11
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c3
-rw-r--r--drivers/net/atl1c/atl1c_hw.c83
-rw-r--r--drivers/net/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/atl1c/atl1c_main.c126
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c1
-rw-r--r--drivers/net/atl1e/atl1e_hw.c23
-rw-r--r--drivers/net/atl1e/atl1e_main.c160
-rw-r--r--drivers/net/atl1e/atl1e_param.c35
-rw-r--r--drivers/net/atlx/atl1.c4
-rw-r--r--drivers/net/atlx/atl2.c12
-rw-r--r--drivers/net/atlx/atl2.h2
-rw-r--r--drivers/net/atlx/atlx.c2
-rw-r--r--drivers/net/atp.c10
-rw-r--r--drivers/net/au1000_eth.c448
-rw-r--r--drivers/net/au1000_eth.h9
-rw-r--r--drivers/net/ax88796.c3
-rw-r--r--drivers/net/b44.c95
-rw-r--r--drivers/net/bcm63xx_enet.c16
-rw-r--r--drivers/net/benet/Kconfig4
-rw-r--r--drivers/net/benet/be.h31
-rw-r--r--drivers/net/benet/be_cmds.c167
-rw-r--r--drivers/net/benet/be_cmds.h48
-rw-r--r--drivers/net/benet/be_ethtool.c144
-rw-r--r--drivers/net/benet/be_hw.h122
-rw-r--r--drivers/net/benet/be_main.c597
-rw-r--r--drivers/net/bfin_mac.c13
-rw-r--r--drivers/net/bmac.c14
-rw-r--r--drivers/net/bnx2.c468
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x.h53
-rw-r--r--drivers/net/bnx2x_fw_defs.h7
-rw-r--r--drivers/net/bnx2x_hsi.h10
-rw-r--r--drivers/net/bnx2x_init_ops.h13
-rw-r--r--drivers/net/bnx2x_link.c21
-rw-r--r--drivers/net/bnx2x_main.c294
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_main.c91
-rw-r--r--drivers/net/bonding/bond_sysfs.c5
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c104
-rw-r--r--drivers/net/can/dev.c9
-rw-r--r--drivers/net/can/mcp251x.c429
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c248
-rw-r--r--drivers/net/can/mscan/mscan.c58
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c3
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c473
-rw-r--r--drivers/net/can/sja1000/sja1000.c27
-rw-r--r--drivers/net/can/ti_hecc.c73
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/ems_usb.c18
-rw-r--r--drivers/net/can/vcan.c13
-rw-r--r--drivers/net/cassini.c439
-rw-r--r--drivers/net/chelsio/common.h45
-rw-r--r--drivers/net/chelsio/cxgb2.c20
-rw-r--r--drivers/net/chelsio/espi.c4
-rw-r--r--drivers/net/chelsio/pm3393.c23
-rw-r--r--drivers/net/chelsio/sge.c15
-rw-r--r--drivers/net/chelsio/subr.c34
-rw-r--r--drivers/net/chelsio/vsc7326.c24
-rw-r--r--drivers/net/cnic.c214
-rw-r--r--drivers/net/cnic.h13
-rw-r--r--drivers/net/cnic_defs.h2
-rw-r--r--drivers/net/cnic_if.h6
-rw-r--r--drivers/net/cpmac.c28
-rw-r--r--drivers/net/cris/eth_v10.c9
-rw-r--r--drivers/net/cs89x0.c9
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/ael1002.c2
-rw-r--r--drivers/net/cxgb3/common.h28
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c71
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c3
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c51
-rw-r--r--drivers/net/cxgb3/t3_hw.c8
-rw-r--r--drivers/net/cxgb3/xgmac.c18
-rw-r--r--drivers/net/cxgb4/Makefile7
-rw-r--r--drivers/net/cxgb4/cxgb4.h741
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3388
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h239
-rw-r--r--drivers/net/cxgb4/l2t.c624
-rw-r--r--drivers/net/cxgb4/l2t.h110
-rw-r--r--drivers/net/cxgb4/sge.c2431
-rw-r--r--drivers/net/cxgb4/t4_hw.c3131
-rw-r--r--drivers/net/cxgb4/t4_hw.h100
-rw-r--r--drivers/net/cxgb4/t4_msg.h664
-rw-r--r--drivers/net/cxgb4/t4_regs.h878
-rw-r--r--drivers/net/cxgb4/t4fw_api.h1580
-rw-r--r--drivers/net/davinci_emac.c147
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/declance.c6
-rw-r--r--drivers/net/defxx.c24
-rw-r--r--drivers/net/depca.c5
-rw-r--r--drivers/net/dl2k.c9
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/e100.c30
-rw-r--r--drivers/net/e1000/e1000.h4
-rw-r--r--drivers/net/e1000/e1000_ethtool.c19
-rw-r--r--drivers/net/e1000/e1000_main.c117
-rw-r--r--drivers/net/e1000e/82571.c92
-rw-r--r--drivers/net/e1000e/defines.h4
-rw-r--r--drivers/net/e1000e/e1000.h27
-rw-r--r--drivers/net/e1000e/es2lan.c34
-rw-r--r--drivers/net/e1000e/ethtool.c3
-rw-r--r--drivers/net/e1000e/hw.h13
-rw-r--r--drivers/net/e1000e/ich8lan.c89
-rw-r--r--drivers/net/e1000e/lib.c284
-rw-r--r--drivers/net/e1000e/netdev.c216
-rw-r--r--drivers/net/e1000e/phy.c85
-rw-r--r--drivers/net/eepro.c22
-rw-r--r--drivers/net/eexpress.c23
-rw-r--r--drivers/net/ehea/ehea_main.c10
-rw-r--r--drivers/net/ehea/ehea_qmr.c1
-rw-r--r--drivers/net/enc28j60.c3
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_main.c208
-rw-r--r--drivers/net/enic/enic_res.c16
-rw-r--r--drivers/net/enic/vnic_dev.c2
-rw-r--r--drivers/net/enic/vnic_enet.h5
-rw-r--r--drivers/net/enic/vnic_intr.c8
-rw-r--r--drivers/net/enic/vnic_intr.h3
-rw-r--r--drivers/net/enic/vnic_nic.h12
-rw-r--r--drivers/net/enic/vnic_rq.c1
-rw-r--r--drivers/net/enic/vnic_wq.c1
-rw-r--r--drivers/net/epic100.c10
-rw-r--r--drivers/net/eql.c1
-rw-r--r--drivers/net/eth16i.c3
-rw-r--r--drivers/net/ethoc.c15
-rw-r--r--drivers/net/ewrk3.c5
-rw-r--r--drivers/net/fealnx.c9
-rw-r--r--drivers/net/fec.c84
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/fec_mpc52xx_phy.c1
-rw-r--r--drivers/net/forcedeth.c11
-rw-r--r--drivers/net/fs_enet/Kconfig10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c93
-rw-r--r--drivers/net/fs_enet/fs_enet.h49
-rw-r--r--drivers/net/fs_enet/mac-fcc.c11
-rw-r--r--drivers/net/fs_enet/mac-fec.c64
-rw-r--r--drivers/net/fs_enet/mac-scc.c14
-rw-r--r--drivers/net/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/fsl_pq_mdio.c50
-rw-r--r--drivers/net/gianfar.c63
-rw-r--r--drivers/net/gianfar.h6
-rw-r--r--drivers/net/gianfar_ethtool.c1
-rw-r--r--drivers/net/gianfar_sysfs.c1
-rw-r--r--drivers/net/greth.c1635
-rw-r--r--drivers/net/greth.h143
-rw-r--r--drivers/net/hamachi.c14
-rw-r--r--drivers/net/hamradio/6pack.c1
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/hamradio/dmascc.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c1
-rw-r--r--drivers/net/hamradio/mkiss.c1
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hp100.c14
-rw-r--r--drivers/net/hplance.c1
-rw-r--r--drivers/net/hydra.c1
-rw-r--r--drivers/net/ibm_newemac/core.c9
-rw-r--r--drivers/net/ibm_newemac/core.h1
-rw-r--r--drivers/net/ibm_newemac/mal.c1
-rw-r--r--drivers/net/ibm_newemac/rgmii.c1
-rw-r--r--drivers/net/ibm_newemac/zmii.c1
-rw-r--r--drivers/net/ibmlana.c6
-rw-r--r--drivers/net/ibmveth.c11
-rw-r--r--drivers/net/igb/e1000_82575.c71
-rw-r--r--drivers/net/igb/e1000_82575.h5
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_hw.h8
-rw-r--r--drivers/net/igb/e1000_mac.c76
-rw-r--r--drivers/net/igb/e1000_mac.h2
-rw-r--r--drivers/net/igb/e1000_phy.c44
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/e1000_regs.h1
-rw-r--r--drivers/net/igb/igb.h17
-rw-r--r--drivers/net/igb/igb_ethtool.c97
-rw-r--r--drivers/net/igb/igb_main.c470
-rw-r--r--drivers/net/igbvf/igbvf.h1
-rw-r--r--drivers/net/igbvf/netdev.c61
-rw-r--r--drivers/net/ioc3-eth.c12
-rw-r--r--drivers/net/ipg.c14
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/au1k_ir.c14
-rw-r--r--drivers/net/irda/bfin_sir.h1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c4
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c1
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sh_sir.c824
-rw-r--r--drivers/net/irda/sir_dev.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/via-ircc.c4
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/irda/w83977af_ir.c38
-rw-r--r--drivers/net/isa-skeleton.c718
-rw-r--r--drivers/net/iseries_veth.c13
-rw-r--r--drivers/net/ixgb/ixgb.h11
-rw-r--r--drivers/net/ixgb/ixgb_main.c114
-rw-r--r--drivers/net/ixgbe/Makefile5
-rw-r--r--drivers/net/ixgbe/ixgbe.h63
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c347
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c21
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c219
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c46
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c779
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h47
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h72
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c731
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3600
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h174
-rw-r--r--drivers/net/ixp2000/ixpdev.c1
-rw-r--r--drivers/net/jazzsonic.c3
-rw-r--r--drivers/net/jme.c98
-rw-r--r--drivers/net/jme.h43
-rw-r--r--drivers/net/korina.c10
-rw-r--r--drivers/net/ks8851.c22
-rw-r--r--drivers/net/ks8851_mll.c12
-rw-r--r--drivers/net/ksz884x.c7338
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lasi_82596.c1
-rw-r--r--drivers/net/lib82596.c23
-rw-r--r--drivers/net/lib8390.c15
-rw-r--r--drivers/net/ll_temac_main.c28
-rw-r--r--drivers/net/ll_temac_mdio.c1
-rw-r--r--drivers/net/loopback.c16
-rw-r--r--drivers/net/lp486e.c16
-rw-r--r--drivers/net/mac8390.c633
-rw-r--r--drivers/net/mac89x0.c6
-rw-r--r--drivers/net/macb.c38
-rw-r--r--drivers/net/mace.c14
-rw-r--r--drivers/net/macmace.c46
-rw-r--r--drivers/net/macsonic.c36
-rw-r--r--drivers/net/macvlan.c117
-rw-r--r--drivers/net/macvtap.c804
-rw-r--r--drivers/net/meth.c3
-rw-r--r--drivers/net/mlx4/cmd.c1
-rw-r--r--drivers/net/mlx4/cq.c1
-rw-r--r--drivers/net/mlx4/en_main.c1
-rw-r--r--drivers/net/mlx4/en_netdev.c1
-rw-r--r--drivers/net/mlx4/en_resources.c1
-rw-r--r--drivers/net/mlx4/en_rx.c9
-rw-r--r--drivers/net/mlx4/en_tx.c1
-rw-r--r--drivers/net/mlx4/eq.c1
-rw-r--r--drivers/net/mlx4/icm.c1
-rw-r--r--drivers/net/mlx4/intf.c2
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mr.c1
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/mlx4/qp.c1
-rw-r--r--drivers/net/mlx4/srq.c1
-rw-r--r--drivers/net/mv643xx_eth.c13
-rw-r--r--drivers/net/mvme147.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c202
-rw-r--r--drivers/net/myri_sbus.c8
-rw-r--r--drivers/net/natsemi.c8
-rw-r--r--drivers/net/ne.c2
-rw-r--r--drivers/net/ne2.c1
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netconsole.c1
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h12
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c16
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c195
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h5
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c53
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c12
-rw-r--r--drivers/net/netxen/netxen_nic_main.c277
-rw-r--r--drivers/net/ni5010.c4
-rw-r--r--drivers/net/ni52.c11
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c702
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/octeon/octeon_mgmt.c19
-rw-r--r--drivers/net/pasemi_mac.c3
-rw-r--r--drivers/net/pci-skeleton.c1029
-rw-r--r--drivers/net/pcmcia/3c574_cs.c9
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c12
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c11
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c19
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c8
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c89
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c88
-rw-r--r--drivers/net/pcnet32.c510
-rw-r--r--drivers/net/phy/broadcom.c9
-rw-r--r--drivers/net/phy/cicada.c1
-rw-r--r--drivers/net/phy/davicom.c1
-rw-r--r--drivers/net/phy/et1011c.c1
-rw-r--r--drivers/net/phy/fixed.c1
-rw-r--r--drivers/net/phy/icplus.c1
-rw-r--r--drivers/net/phy/lxt.c1
-rw-r--r--drivers/net/phy/marvell.c39
-rw-r--r--drivers/net/phy/mdio-bitbang.c1
-rw-r--r--drivers/net/phy/mdio-octeon.c1
-rw-r--r--drivers/net/phy/mdio_bus.c72
-rw-r--r--drivers/net/phy/phy.c5
-rw-r--r--drivers/net/phy/phy_device.c47
-rw-r--r--drivers/net/phy/qsemi.c1
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/plip.c1
-rw-r--r--drivers/net/ppp_async.c1
-rw-r--r--drivers/net/ppp_generic.c123
-rw-r--r--drivers/net/ppp_synctty.c1
-rw-r--r--drivers/net/pppol2tp.c6
-rw-r--r--drivers/net/pppox.c1
-rw-r--r--drivers/net/ps3_gelic_net.c5
-rw-r--r--drivers/net/ps3_gelic_wireless.c150
-rw-r--r--drivers/net/qla3xxx.c3
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1131
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c534
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c1032
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1260
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1678
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2726
-rw-r--r--drivers/net/qlge/qlge.h446
-rw-r--r--drivers/net/qlge/qlge_dbg.c1185
-rw-r--r--drivers/net/qlge/qlge_ethtool.c57
-rw-r--r--drivers/net/qlge/qlge_main.c1204
-rw-r--r--drivers/net/qlge/qlge_mpi.c340
-rw-r--r--drivers/net/r6040.c43
-rw-r--r--drivers/net/r8169.c218
-rw-r--r--drivers/net/rionet.c1
-rw-r--r--drivers/net/rrunner.c5
-rw-r--r--drivers/net/s2io.c26
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/sb1250-mac.c6
-rw-r--r--drivers/net/sc92031.c6
-rw-r--r--drivers/net/seeq8005.c1
-rw-r--r--drivers/net/sfc/efx.c20
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/falcon.c12
-rw-r--r--drivers/net/sfc/falcon_boards.c58
-rw-r--r--drivers/net/sfc/falcon_xmac.c38
-rw-r--r--drivers/net/sfc/mcdi.c123
-rw-r--r--drivers/net/sfc/mcdi.h2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h206
-rw-r--r--drivers/net/sfc/mcdi_phy.c126
-rw-r--r--drivers/net/sfc/mdio_10g.c24
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/mtd.c6
-rw-r--r--drivers/net/sfc/net_driver.h18
-rw-r--r--drivers/net/sfc/nic.c15
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/qt202x_phy.c248
-rw-r--r--drivers/net/sfc/regs.h2
-rw-r--r--drivers/net/sfc/rx.c1
-rw-r--r--drivers/net/sfc/selftest.c39
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c31
-rw-r--r--drivers/net/sfc/tenxpress.c141
-rw-r--r--drivers/net/sfc/tx.c5
-rw-r--r--drivers/net/sgiseeq.c7
-rw-r--r--drivers/net/sh_eth.c13
-rw-r--r--drivers/net/sis190.c222
-rw-r--r--drivers/net/sis900.c9
-rw-r--r--drivers/net/skfp/ess.c2
-rw-r--r--drivers/net/skfp/skfddi.c37
-rw-r--r--drivers/net/skge.c219
-rw-r--r--drivers/net/sky2.c741
-rw-r--r--drivers/net/sky2.h10
-rw-r--r--drivers/net/slhc.c1
-rw-r--r--drivers/net/slip.c1
-rw-r--r--drivers/net/smc911x.c15
-rw-r--r--drivers/net/smc911x.h4
-rw-r--r--drivers/net/smc9194.c13
-rw-r--r--drivers/net/smc91x.c12
-rw-r--r--drivers/net/smc91x.h42
-rw-r--r--drivers/net/smsc911x.c54
-rw-r--r--drivers/net/smsc9420.c12
-rw-r--r--drivers/net/sni_82596.c1
-rw-r--r--drivers/net/sonic.c13
-rw-r--r--drivers/net/spider_net.c10
-rw-r--r--drivers/net/starfire.c18
-rw-r--r--drivers/net/stmmac/Kconfig9
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h279
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)213
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c244
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)351
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/stmmac/stmmac_main.c447
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c12
-rw-r--r--drivers/net/sun3_82586.c11
-rw-r--r--drivers/net/sun3lance.c3
-rw-r--r--drivers/net/sunbmac.c9
-rw-r--r--drivers/net/sundance.c10
-rw-r--r--drivers/net/sungem.c18
-rw-r--r--drivers/net/sunhme.c26
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/sunqe.c11
-rw-r--r--drivers/net/sunvnet.c7
-rw-r--r--drivers/net/tc35815.c28
-rw-r--r--drivers/net/tehuti.c161
-rw-r--r--drivers/net/tehuti.h31
-rw-r--r--drivers/net/tg3.c993
-rw-r--r--drivers/net/tg3.h165
-rw-r--r--drivers/net/tlan.c37
-rw-r--r--drivers/net/tlan.h3
-rw-r--r--drivers/net/tokenring/3c359.c8
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c7
-rw-r--r--drivers/net/tokenring/madgemc.c1
-rw-r--r--drivers/net/tokenring/olympic.c7
-rw-r--r--drivers/net/tokenring/smctr.c1
-rw-r--r--drivers/net/tokenring/tms380tr.c13
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tsi108_eth.c24
-rw-r--r--drivers/net/tulip/21142.c76
-rw-r--r--drivers/net/tulip/Kconfig4
-rw-r--r--drivers/net/tulip/de2104x.c164
-rw-r--r--drivers/net/tulip/de4x5.c18
-rw-r--r--drivers/net/tulip/dmfe.c121
-rw-r--r--drivers/net/tulip/eeprom.c54
-rw-r--r--drivers/net/tulip/interrupt.c100
-rw-r--r--drivers/net/tulip/media.c74
-rw-r--r--drivers/net/tulip/pnic.c33
-rw-r--r--drivers/net/tulip/pnic2.c59
-rw-r--r--drivers/net/tulip/timer.c52
-rw-r--r--drivers/net/tulip/tulip_core.c213
-rw-r--r--drivers/net/tulip/uli526x.c73
-rw-r--r--drivers/net/tulip/winbond-840.c187
-rw-r--r--drivers/net/tulip/xircom_cb.c46
-rw-r--r--drivers/net/tun.c139
-rw-r--r--drivers/net/typhoon.c266
-rw-r--r--drivers/net/ucc_geth.c79
-rw-r--r--drivers/net/ucc_geth.h13
-rw-r--r--drivers/net/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/usb/Kconfig30
-rw-r--r--drivers/net/usb/Makefile3
-rw-r--r--drivers/net/usb/asix.c148
-rw-r--r--drivers/net/usb/catc.c11
-rw-r--r--drivers/net/usb/cdc-phonet.c1
-rw-r--r--drivers/net/usb/cdc_eem.c11
-rw-r--r--drivers/net/usb/cdc_ether.c30
-rw-r--r--drivers/net/usb/dm9601.c60
-rw-r--r--drivers/net/usb/gl620a.c1
-rw-r--r--drivers/net/usb/hso.c108
-rw-r--r--drivers/net/usb/int51x1.c18
-rw-r--r--drivers/net/usb/ipheth.c569
-rw-r--r--drivers/net/usb/kaweth.c3
-rw-r--r--drivers/net/usb/mcs7830.c257
-rw-r--r--drivers/net/usb/net1080.c110
-rw-r--r--drivers/net/usb/pegasus.c172
-rw-r--r--drivers/net/usb/pegasus.h6
-rw-r--r--drivers/net/usb/rndis_host.c25
-rw-r--r--drivers/net/usb/rtl8150.c13
-rw-r--r--drivers/net/usb/sierra_net.c1001
-rw-r--r--drivers/net/usb/smsc75xx.c1289
-rw-r--r--drivers/net/usb/smsc75xx.h421
-rw-r--r--drivers/net/usb/smsc95xx.c279
-rw-r--r--drivers/net/usb/usbnet.c239
-rw-r--r--drivers/net/veth.c20
-rw-r--r--drivers/net/via-rhine.c51
-rw-r--r--drivers/net/via-velocity.c67
-rw-r--r--drivers/net/virtio_net.c480
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c19
-rw-r--r--drivers/net/vxge/vxge-config.c1
-rw-r--r--drivers/net/vxge/vxge-config.h1
-rw-r--r--drivers/net/vxge/vxge-ethtool.c1
-rw-r--r--drivers/net/vxge/vxge-main.c25
-rw-r--r--drivers/net/wan/cosa.c10
-rw-r--r--drivers/net/wan/dscc4.c3
-rw-r--r--drivers/net/wan/farsync.c3
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hdlc_cisco.c9
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wan/hdlc_raw.c1
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c2
-rw-r--r--drivers/net/wan/hdlc_x25.c6
-rw-r--r--drivers/net/wan/hostess_sv11.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c1
-rw-r--r--drivers/net/wan/pc300_drv.c3
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/sbni.c1
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wan/x25_asy.c1
-rw-r--r--drivers/net/wan/z85230.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c1
-rw-r--r--drivers/net/wimax/i2400m/driver.c18
-rw-r--r--drivers/net/wimax/i2400m/fw.c14
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h2
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wimax/i2400m/netdev.c1
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c1
-rw-r--r--drivers/net/wimax/i2400m/rx.c1
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c1
-rw-r--r--drivers/net/wimax/i2400m/sdio.c5
-rw-r--r--drivers/net/wimax/i2400m/tx.c1
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c1
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c17
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/adm8211.c28
-rw-r--r--drivers/net/wireless/airo.c38
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h18
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c208
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c177
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c127
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c38
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h8
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c42
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h86
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c200
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c442
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c186
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c865
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h37
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1474
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c77
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c129
-rw-r--r--drivers/net/wireless/ath/debug.h8
-rw-r--r--drivers/net/wireless/ath/regd.c6
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h21
-rw-r--r--drivers/net/wireless/b43/dma.c217
-rw-r--r--drivers/net/wireless/b43/dma.h12
-rw-r--r--drivers/net/wireless/b43/lo.c1
-rw-r--r--drivers/net/wireless/b43/main.c112
-rw-r--r--drivers/net/wireless/b43/pcmcia.c1
-rw-r--r--drivers/net/wireless/b43/phy_a.c2
-rw-r--r--drivers/net/wireless/b43/phy_common.c45
-rw-r--r--drivers/net/wireless/b43/phy_common.h10
-rw-r--r--drivers/net/wireless/b43/phy_g.c1
-rw-r--r--drivers/net/wireless/b43/phy_lp.c78
-rw-r--r--drivers/net/wireless/b43/phy_n.c3036
-rw-r--r--drivers/net/wireless/b43/phy_n.h98
-rw-r--r--drivers/net/wireless/b43/pio.c18
-rw-r--r--drivers/net/wireless/b43/pio.h45
-rw-r--r--drivers/net/wireless/b43/sdio.c1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c744
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h100
-rw-r--r--drivers/net/wireless/b43legacy/dma.c21
-rw-r--r--drivers/net/wireless/b43legacy/dma.h10
-rw-r--r--drivers/net/wireless/b43legacy/leds.h2
-rw-r--r--drivers/net/wireless/b43legacy/main.c62
-rw-r--r--drivers/net/wireless/b43legacy/phy.c1
-rw-r--r--drivers/net/wireless/b43legacy/pio.c14
-rw-r--r--drivers/net/wireless/b43legacy/pio.h11
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c18
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c22
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_geo.c1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c37
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c145
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c75
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c513
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c445
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c1462
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h115
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c220
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c162
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c210
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c270
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c78
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c1
-rw-r--r--drivers/net/wireless/libertas/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c96
-rw-r--r--drivers/net/wireless/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/libertas/cmd.c23
-rw-r--r--drivers/net/wireless/libertas/cmd.h12
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c22
-rw-r--r--drivers/net/wireless/libertas/debugfs.c1
-rw-r--r--drivers/net/wireless/libertas/defs.h7
-rw-r--r--drivers/net/wireless/libertas/dev.h9
-rw-r--r--drivers/net/wireless/libertas/ethtool.c2
-rw-r--r--drivers/net/wireless/libertas/if_cs.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c2
-rw-r--r--drivers/net/wireless/libertas/if_usb.c1
-rw-r--r--drivers/net/wireless/libertas/main.c82
-rw-r--r--drivers/net/wireless/libertas/mesh.c33
-rw-r--r--drivers/net/wireless/libertas/mesh.h32
-rw-r--r--drivers/net/wireless/libertas/rx.c1
-rw-r--r--drivers/net/wireless/libertas/scan.c23
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c29
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c1
-rw-r--r--drivers/net/wireless/libertas_tf/main.c16
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c199
-rw-r--r--drivers/net/wireless/mwl8k.c2090
-rw-r--r--drivers/net/wireless/orinoco/fw.c1
-rw-r--r--drivers/net/wireless/orinoco/hw.c22
-rw-r--r--drivers/net/wireless/orinoco/hw.h2
-rw-r--r--drivers/net/wireless/orinoco/main.c8
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c9
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/scan.c1
-rw-r--r--drivers/net/wireless/orinoco/wext.c7
-rw-r--r--drivers/net/wireless/p54/eeprom.c1
-rw-r--r--drivers/net/wireless/p54/fwio.c1
-rw-r--r--drivers/net/wireless/p54/main.c52
-rw-r--r--drivers/net/wireless/p54/p54.h8
-rw-r--r--drivers/net/wireless/p54/p54pci.c85
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/p54/p54usb.c4
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h1
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c1
-rw-r--r--drivers/net/wireless/ray_cs.c11
-rw-r--r--drivers/net/wireless/rndis_wlan.c429
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig71
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c49
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c49
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h16
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c221
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c104
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c378
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h102
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c25
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c42
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c88
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c54
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c50
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c39
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c28
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.h2
-rw-r--r--drivers/net/wireless/wl12xx/Makefile4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c70
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h87
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c84
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c27
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c376
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c197
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h50
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c103
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c142
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h174
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c63
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c51
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c213
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c824
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c13
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c159
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h30
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c284
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.h31
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c71
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h36
-rw-r--r--drivers/net/wireless/zd1201.c15
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c141
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c39
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c16
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/net/xilinx_emaclite.c385
-rw-r--r--drivers/net/xtsonic.c3
-rw-r--r--drivers/net/yellowfin.c174
-rw-r--r--drivers/net/znet.c4
886 files changed, 77722 insertions, 21214 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 4d4cad393dce..3ea42ff17657 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -117,7 +117,6 @@ static const char version[] =
#include <linux/fcntl.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
@@ -812,7 +811,7 @@ static void set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
outb(RX_PROM, RX_CMD);
inb(RX_STATUS);
- } else if (dev->mc_list || dev->flags & IFF_ALLMULTI) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
/* Multicast or all multicast is the same */
outb(RX_MULT, RX_CMD);
inb(RX_STATUS); /* Clear status. */
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 9257d7ce0378..29b8d1d63bde 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -102,12 +102,12 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -1216,7 +1216,7 @@ static int elp_close(struct net_device *dev)
static void elp_set_mc_list(struct net_device *dev)
{
elp_device *adapter = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
unsigned long flags;
@@ -1229,11 +1229,10 @@ static void elp_set_mc_list(struct net_device *dev)
/* send a "load multicast list" command to the board, max 10 addrs/cmd */
/* if num_addrs==0 the list will be cleared */
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * dev->mc_count;
- for (i = 0; i < dev->mc_count; i++) {
- memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
- dmi = dmi->next;
- }
+ adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
pr_err("%s: couldn't send set_multicast command\n", dev->name);
@@ -1244,7 +1243,7 @@ static void elp_set_mc_list(struct net_device *dev)
TIMEOUT_MSG(__LINE__);
}
}
- if (dev->mc_count)
+ if (!netdev_mc_empty(dev))
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
else /* num_addrs == 0 */
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index fbc231153e55..b32b7a1710b7 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -56,8 +56,8 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/bitops.h>
@@ -734,8 +734,7 @@ static void init_82586_mem(struct net_device *dev)
memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
/* Fill in the station address. */
- memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr,
- sizeof(dev->dev_addr));
+ memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
/* The Tx-block list is written as needed. We just set up the values. */
lp->tx_cmd_link = IDLELOOP + 4;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9d85efce5916..ab9bb3c52002 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -76,7 +76,6 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -1111,12 +1110,14 @@ set_multicast_list(struct net_device *dev)
unsigned long flags;
struct el3_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
+ int mc_count = netdev_mc_count(dev);
if (el3_debug > 1) {
static int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
- pr_debug("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ if (old != mc_count) {
+ old = mc_count;
+ pr_debug("%s: Setting Rx mode to %d addresses.\n",
+ dev->name, mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
@@ -1124,7 +1125,7 @@ set_multicast_list(struct net_device *dev)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
}
- else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
}
else
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 063b049ffe55..2e17837be546 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -65,7 +65,6 @@ static int max_interrupt_work = 20;
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
@@ -1536,7 +1535,7 @@ static void set_rx_mode(struct net_device *dev)
pr_debug("%s: Setting promiscuous mode.\n",
dev->name);
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
- } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 27d80ca5e4c0..1719079cc498 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -99,7 +99,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mca-legacy.h>
@@ -625,8 +624,8 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ struct dev_mc_list *dmi;
+ int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -771,7 +770,7 @@ static int init586(struct net_device *dev)
* Multicast setup
*/
- if (dev->mc_count) {
+ if (num_addrs) {
/* I don't understand this: do we really need memory after the init? */
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if (len <= 0) {
@@ -787,10 +786,9 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
- for (i = 0; i < num_addrs; i++) {
- memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr, 6);
- dmi = dmi->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 36c4191e7bca..5c07b147ec99 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1526,32 +1526,29 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
if ((dev->flags&IFF_PROMISC) ||
(dev->flags&IFF_ALLMULTI) ||
- dev->mc_count > 10)
+ netdev_mc_count(dev) > 10)
/* Enable promiscuous mode */
filt |= 1;
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
unsigned char block[62];
unsigned char *bp;
- struct dev_mc_list *dmc=dev->mc_list;
-
- int i;
+ struct dev_mc_list *dmc;
if(retry==0)
lp->mc_list_valid = 0;
if(!lp->mc_list_valid)
{
block[1]=0;
- block[0]=dev->mc_count;
+ block[0]=netdev_mc_count(dev);
bp=block+2;
- for(i=0;i<dev->mc_count;i++)
- {
+ netdev_for_each_mc_addr(dmc, dev) {
memcpy(bp, dmc->dmi_addr, 6);
bp+=6;
- dmc=dmc->next;
}
- if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
+ if(mc32_command_nowait(dev, 2, block,
+ 2+6*netdev_mc_count(dev))==-1)
{
lp->mc_reload_wait = 1;
return;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815d..5f92fdbe66e2 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -77,7 +77,6 @@ static int vortex_debug = 1;
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/mii.h>
@@ -90,6 +89,7 @@ static int vortex_debug = 1;
#include <linux/eisa.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/gfp.h>
#include <asm/irq.h> /* For nr_irqs only. */
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
};
-static struct pci_device_id vortex_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
@@ -2970,7 +2970,7 @@ static void set_rx_mode(struct net_device *dev)
if (vortex_debug > 3)
pr_notice("%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
- } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index b1e5764628c6..500e135723bd 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -26,7 +26,6 @@
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/route.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <asm/irq.h>
@@ -595,9 +594,8 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
/* set all multicast bits */
@@ -611,9 +609,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9e..a09e6ce3eaa0 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -46,6 +46,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "8139cp"
#define DRV_VERSION "1.3"
#define DRV_RELDATE "Mar 22, 2004"
@@ -62,6 +64,7 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
+#include <linux/gfp.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
@@ -104,8 +107,6 @@ static int multicast_filter_limit = 32;
module_param(multicast_filter_limit, int, 0);
MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
-#define PFX DRV_NAME ": "
-
#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK)
@@ -394,7 +395,7 @@ static int cp_get_eeprom(struct net_device *dev,
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
-static struct pci_device_id cp_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
@@ -470,9 +471,8 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
u32 status, u32 len)
{
- if (netif_msg_rx_err (cp))
- pr_debug("%s: rx err, slot %d status 0x%x len %d\n",
- cp->dev->name, rx_tail, status, len);
+ netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
cp->dev->stats.rx_errors++;
if (status & RxErrFrame)
cp->dev->stats.rx_frame_errors++;
@@ -545,9 +545,8 @@ rx_status_loop:
goto rx_next;
}
- if (netif_msg_rx_status(cp))
- pr_debug("%s: rx slot %d status 0x%x len %d\n",
- dev->name, rx_tail, status, len);
+ netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
new_skb = netdev_alloc_skb_ip_align(dev, buflen);
if (!new_skb) {
@@ -621,9 +620,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
if (!status || (status == 0xFFFF))
return IRQ_NONE;
- if (netif_msg_intr(cp))
- pr_debug("%s: intr, status %04x cmd %02x cpcmd %04x\n",
- dev->name, status, cpr8(Cmd), cpr16(CpCmd));
+ netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
+ status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
@@ -654,8 +652,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
- pr_err("%s: PCI bus error, status=%04x, PCI status=%04x\n",
- dev->name, status, pci_status);
+ netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
+ status, pci_status);
/* TODO: reset hardware */
}
@@ -700,9 +698,8 @@ static void cp_tx (struct cp_private *cp)
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
- if (netif_msg_tx_err(cp))
- pr_debug("%s: tx err, status 0x%x\n",
- cp->dev->name, status);
+ netif_dbg(cp, tx_err, cp->dev,
+ "tx err, status 0x%x\n", status);
cp->dev->stats.tx_errors++;
if (status & TxOWC)
cp->dev->stats.tx_window_errors++;
@@ -717,8 +714,8 @@ static void cp_tx (struct cp_private *cp)
((status >> TxColCntShift) & TxColCntMask);
cp->dev->stats.tx_packets++;
cp->dev->stats.tx_bytes += skb->len;
- if (netif_msg_tx_done(cp))
- pr_debug("%s: tx done, slot %d\n", cp->dev->name, tx_tail);
+ netif_dbg(cp, tx_done, cp->dev,
+ "tx done, slot %d\n", tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -752,8 +749,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->lock, intr_flags);
- pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -878,9 +874,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
wmb();
}
cp->tx_head = entry;
- if (netif_msg_tx_queued(cp))
- pr_debug("%s: tx queued, slot %d, skblen %d\n",
- dev->name, entry, skb->len);
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -899,7 +894,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
@@ -909,7 +904,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -918,8 +913,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -993,7 +987,7 @@ static void cp_reset_hw (struct cp_private *cp)
schedule_timeout_uninterruptible(10);
}
- pr_err("%s: hardware reset timeout\n", cp->dev->name);
+ netdev_err(cp->dev, "hardware reset timeout\n");
}
static inline void cp_start_hw (struct cp_private *cp)
@@ -1160,8 +1154,7 @@ static int cp_open (struct net_device *dev)
struct cp_private *cp = netdev_priv(dev);
int rc;
- if (netif_msg_ifup(cp))
- pr_debug("%s: enabling interface\n", dev->name);
+ netif_dbg(cp, ifup, dev, "enabling interface\n");
rc = cp_alloc_rings(cp);
if (rc)
@@ -1195,8 +1188,7 @@ static int cp_close (struct net_device *dev)
napi_disable(&cp->napi);
- if (netif_msg_ifdown(cp))
- pr_debug("%s: disabling interface\n", dev->name);
+ netif_dbg(cp, ifdown, dev, "disabling interface\n");
spin_lock_irqsave(&cp->lock, flags);
@@ -1219,9 +1211,9 @@ static void cp_tx_timeout(struct net_device *dev)
unsigned long flags;
int rc;
- pr_warning("%s: Transmit timeout, status %2x %4x %4x %4x\n",
- dev->name, cpr8(Cmd), cpr16(CpCmd),
- cpr16(IntrStatus), cpr16(IntrMask));
+ netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
+ cpr8(Cmd), cpr16(CpCmd),
+ cpr16(IntrStatus), cpr16(IntrMask));
spin_lock_irqsave(&cp->lock, flags);
@@ -1874,8 +1866,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
dev_info(&pdev->dev,
- "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
- pdev->vendor, pdev->device, pdev->revision);
+ "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
+ pdev->vendor, pdev->device, pdev->revision);
return -ENODEV;
}
@@ -1933,14 +1925,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
- "No usable DMA configuration, aborting.\n");
+ "No usable DMA configuration, aborting\n");
goto err_out_res;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
- "No usable consistent DMA configuration, "
- "aborting.\n");
+ "No usable consistent DMA configuration, aborting\n");
goto err_out_res;
}
}
@@ -1952,7 +1943,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (!regs) {
rc = -EIO;
dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
- (unsigned long long)pci_resource_len(pdev, 1),
+ (unsigned long long)pci_resource_len(pdev, 1),
(unsigned long long)pciaddr);
goto err_out_res;
}
@@ -1990,11 +1981,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_iomap;
- pr_info("%s: RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daabd..f0d23de32967 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -89,6 +89,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "8139too"
#define DRV_VERSION "0.9.28"
@@ -108,10 +110,10 @@
#include <linux/crc32.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/gfp.h>
#include <asm/irq.h>
#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
-#define PFX DRV_NAME ": "
/* Default Message level */
#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
@@ -130,9 +132,9 @@
# define assert(expr) do {} while (0)
#else
# define assert(expr) \
- if(unlikely(!(expr))) { \
- pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
+ if (unlikely(!(expr))) { \
+ pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
}
#endif
@@ -231,7 +233,7 @@ static const struct {
};
-static struct pci_device_id rtl8139_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
@@ -957,7 +959,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
- pr_info("8139too: OQO Model 2 detected. Forcing PIO\n");
+ pr_info("OQO Model 2 detected. Forcing PIO\n");
use_io = 1;
}
@@ -1010,21 +1012,19 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
tp->mii.reg_num_mask = 0x1f;
/* dev is fully set up and ready to use now */
- pr_debug("about to register device named %s (%p)...\n", dev->name, dev);
+ pr_debug("about to register device named %s (%p)...\n",
+ dev->name, dev);
i = register_netdev (dev);
if (i) goto err_out;
pci_set_drvdata (pdev, dev);
- pr_info("%s: %s at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- board_info[ent->driver_data].name,
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ board_info[ent->driver_data].name,
+ dev->base_addr, dev->dev_addr, dev->irq);
- pr_debug("%s: Identified 8139 chip type '%s'\n",
- dev->name, rtl_chip_info[tp->chipset].name);
+ netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
+ rtl_chip_info[tp->chipset].name);
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but
@@ -1037,13 +1037,12 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (mii_status != 0xffff && mii_status != 0x0000) {
u16 advertising = mdio_read(dev, phy, 4);
tp->phys[phy_idx++] = phy;
- pr_info("%s: MII transceiver %d status 0x%4.4x advertising %4.4x.\n",
- dev->name, phy, mii_status, advertising);
+ netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n",
+ phy, mii_status, advertising);
}
}
if (phy_idx == 0) {
- pr_info("%s: No MII transceivers found! Assuming SYM transceiver.\n",
- dev->name);
+ netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n");
tp->phys[0] = 32;
}
} else
@@ -1062,15 +1061,15 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
tp->mii.full_duplex = full_duplex[board_idx];
if (tp->mii.full_duplex) {
- pr_info("%s: Media type forced to Full Duplex.\n", dev->name);
+ netdev_info(dev, "Media type forced to Full Duplex\n");
/* Changing the MII-advertised media because might prevent
re-connection. */
tp->mii.force_media = 1;
}
if (tp->default_port) {
- pr_info(" Forcing %dMbps %s-duplex operation.\n",
- (option & 0x20 ? 100 : 10),
- (option & 0x10 ? "full" : "half"));
+ netdev_info(dev, " Forcing %dMbps %s-duplex operation\n",
+ (option & 0x20 ? 100 : 10),
+ (option & 0x10 ? "full" : "half"));
mdio_write(dev, tp->phys[0], 0,
((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
@@ -1330,12 +1329,12 @@ static int rtl8139_open (struct net_device *dev)
rtl8139_hw_start (dev);
netif_start_queue (dev);
- if (netif_msg_ifup(tp))
- pr_debug("%s: rtl8139_open() ioaddr %#llx IRQ %d"
- " GP Pins %2.2x %s-duplex.\n", dev->name,
- (unsigned long long)pci_resource_start (tp->pci_dev, 1),
- dev->irq, RTL_R8 (MediaStatus),
- tp->mii.full_duplex ? "full" : "half");
+ netif_dbg(tp, ifup, dev,
+ "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
+ __func__,
+ (unsigned long long)pci_resource_start (tp->pci_dev, 1),
+ dev->irq, RTL_R8 (MediaStatus),
+ tp->mii.full_duplex ? "full" : "half");
rtl8139_start_thread(tp);
@@ -1393,7 +1392,7 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
}
- pr_debug("init buffer addresses\n");
+ netdev_dbg(dev, "init buffer addresses\n");
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1555,14 +1554,11 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
tp->mii.full_duplex = duplex;
if (mii_lpa) {
- pr_info("%s: Setting %s-duplex based on MII #%d link"
- " partner ability of %4.4x.\n",
- dev->name,
- tp->mii.full_duplex ? "full" : "half",
- tp->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
+ tp->mii.full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
} else {
- pr_info("%s: media is unconnected, link down, or incompatible connection\n",
- dev->name);
+ netdev_info(dev, "media is unconnected, link down, or incompatible connection\n");
}
#if 0
RTL_W8 (Cfg9346, Cfg9346_Unlock);
@@ -1576,13 +1572,12 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
rtl8139_tune_twister (dev, tp);
- pr_debug("%s: Media selection tick, Link partner %4.4x.\n",
- dev->name, RTL_R16 (NWayLPAR));
- pr_debug("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n",
- dev->name, RTL_R16 (IntrMask), RTL_R16 (IntrStatus));
- pr_debug("%s: Chip config %2.2x %2.2x.\n",
- dev->name, RTL_R8 (Config0),
- RTL_R8 (Config1));
+ netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
+ RTL_R16(NWayLPAR));
+ netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n",
+ RTL_R16(IntrMask), RTL_R16(IntrStatus));
+ netdev_dbg(dev, "Chip config %02x %02x\n",
+ RTL_R8(Config0), RTL_R8(Config1));
}
static void rtl8139_thread (struct work_struct *work)
@@ -1640,17 +1635,17 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
int i;
u8 tmp8;
- pr_debug("%s: Transmit timeout, status %2.2x %4.4x %4.4x media %2.2x.\n",
- dev->name, RTL_R8 (ChipCmd),
- RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus));
+ netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
+ RTL_R8(ChipCmd), RTL_R16(IntrStatus),
+ RTL_R16(IntrMask), RTL_R8(MediaStatus));
/* Emit info to figure out what went wrong. */
- pr_debug("%s: Tx queue start entry %ld dirty entry %ld.\n",
- dev->name, tp->cur_tx, tp->dirty_tx);
+ netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
+ tp->cur_tx, tp->dirty_tx);
for (i = 0; i < NUM_TX_DESC; i++)
- pr_debug("%s: Tx descriptor %d is %8.8lx.%s\n",
- dev->name, i, RTL_R32 (TxStatus0 + (i * 4)),
- i == tp->dirty_tx % NUM_TX_DESC ?
- " (queue head)" : "");
+ netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
+ i, RTL_R32(TxStatus0 + (i * 4)),
+ i == tp->dirty_tx % NUM_TX_DESC ?
+ " (queue head)" : "");
tp->xstats.tx_timeouts++;
@@ -1729,9 +1724,8 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
netif_stop_queue (dev);
spin_unlock_irqrestore(&tp->lock, flags);
- if (netif_msg_tx_queued(tp))
- pr_debug("%s: Queued Tx packet size %u to slot %d.\n",
- dev->name, len, entry);
+ netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n",
+ len, entry);
return NETDEV_TX_OK;
}
@@ -1760,9 +1754,8 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
/* Note: TxCarrierLost is always asserted at 100mbps. */
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
- if (netif_msg_tx_err(tp))
- pr_debug("%s: Transmit error, Tx status %8.8x.\n",
- dev->name, txstatus);
+ netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n",
+ txstatus);
dev->stats.tx_errors++;
if (txstatus & TxAborted) {
dev->stats.tx_aborted_errors++;
@@ -1792,8 +1785,8 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
#ifndef RTL8139_NDEBUG
if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
- pr_err("%s: Out-of-sync dirty pointer, %ld vs. %ld.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n",
+ dirty_tx, tp->cur_tx);
dirty_tx += NUM_TX_DESC;
}
#endif /* RTL8139_NDEBUG */
@@ -1816,14 +1809,13 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
int tmp_work;
#endif
- if (netif_msg_rx_err (tp))
- pr_debug("%s: Ethernet frame had errors, status %8.8x.\n",
- dev->name, rx_status);
+ netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n",
+ rx_status);
dev->stats.rx_errors++;
if (!(rx_status & RxStatusOK)) {
if (rx_status & RxTooLong) {
- pr_debug("%s: Oversized Ethernet frame, status %4.4x!\n",
- dev->name, rx_status);
+ netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
+ rx_status);
/* A.C.: The chip hangs here. */
}
if (rx_status & (RxBadSymbol | RxBadAlign))
@@ -1855,7 +1847,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
break;
}
if (tmp_work <= 0)
- pr_warning(PFX "rx stop wait too long\n");
+ netdev_warn(dev, "rx stop wait too long\n");
/* restart receive */
tmp_work = 200;
while (--tmp_work > 0) {
@@ -1866,7 +1858,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
break;
}
if (tmp_work <= 0)
- pr_warning(PFX "tx/rx enable wait too long\n");
+ netdev_warn(dev, "tx/rx enable wait too long\n");
/* and reinitialize all rx related registers */
RTL_W8_F (Cfg9346, Cfg9346_Unlock);
@@ -1877,7 +1869,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
RTL_W32 (RxConfig, tp->rx_config);
tp->cur_rx = 0;
- pr_debug("init buffer addresses\n");
+ netdev_dbg(dev, "init buffer addresses\n");
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1931,10 +1923,9 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
unsigned int cur_rx = tp->cur_rx;
unsigned int rx_size = 0;
- pr_debug("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx,
- RTL_R16 (RxBufAddr),
- RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+ netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ __func__, (u16)cur_rx,
+ RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
while (netif_running(dev) && received < budget &&
(RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
@@ -1950,19 +1941,12 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
rx_size = rx_status >> 16;
pkt_size = rx_size - 4;
- if (netif_msg_rx_status(tp))
- pr_debug("%s: rtl8139_rx() status %4.4x, size %4.4x,"
- " cur %4.4x.\n", dev->name, rx_status,
- rx_size, cur_rx);
+ netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
+ __func__, rx_status, rx_size, cur_rx);
#if RTL8139_DEBUG > 2
- {
- int i;
- pr_debug("%s: Frame contents ", dev->name);
- for (i = 0; i < 70; i++)
- pr_cont(" %2.2x",
- rx_ring[ring_offset + i]);
- pr_cont(".\n");
- }
+ print_hex_dump(KERN_DEBUG, "Frame contents: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ &rx_ring[ring_offset], 70, true);
#endif
/* Packet copy from FIFO still in progress.
@@ -1973,14 +1957,11 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
if (!tp->fifo_copy_timeout)
tp->fifo_copy_timeout = jiffies + 2;
else if (time_after(jiffies, tp->fifo_copy_timeout)) {
- pr_debug("%s: hung FIFO. Reset.", dev->name);
+ netdev_dbg(dev, "hung FIFO. Reset\n");
rx_size = 0;
goto no_early_rx;
}
- if (netif_msg_intr(tp)) {
- pr_debug("%s: fifo copy in progress.",
- dev->name);
- }
+ netif_dbg(tp, intr, dev, "fifo copy in progress\n");
tp->xstats.early_rx++;
break;
}
@@ -2021,8 +2002,7 @@ no_early_rx:
netif_receive_skb (skb);
} else {
if (net_ratelimit())
- pr_warning("%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
received++;
@@ -2036,10 +2016,9 @@ no_early_rx:
if (unlikely(!received || rx_size == 0xfff0))
rtl8139_isr_ack(tp);
- pr_debug("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
- RTL_R16 (RxBufAddr),
- RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+ netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ __func__, cur_rx,
+ RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
tp->cur_rx = cur_rx;
@@ -2060,8 +2039,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
void __iomem *ioaddr,
int status, int link_changed)
{
- pr_debug("%s: Abnormal interrupt, status %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status);
assert (dev != NULL);
assert (tp != NULL);
@@ -2089,8 +2067,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
- pr_err("%s: PCI Bus error %4.4x.\n",
- dev->name, pci_cmd_status);
+ netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
}
}
@@ -2183,8 +2160,8 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
out:
spin_unlock (&tp->lock);
- pr_debug("%s: exiting interrupt, intr_status=%#4.4x.\n",
- dev->name, RTL_R16 (IntrStatus));
+ netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n",
+ RTL_R16(IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -2233,9 +2210,8 @@ static int rtl8139_close (struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&tp->napi);
- if (netif_msg_ifdown(tp))
- pr_debug("%s: Shutting down ethercard, status was 0x%4.4x.\n",
- dev->name, RTL_R16 (IntrStatus));
+ netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n",
+ RTL_R16(IntrStatus));
spin_lock_irqsave (&tp->lock, flags);
@@ -2509,11 +2485,11 @@ static void __set_rx_mode (struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
- pr_debug("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
- dev->name, dev->flags, RTL_R32 (RxConfig));
+ netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n",
+ dev->flags, RTL_R32(RxConfig));
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
@@ -2521,7 +2497,7 @@ static void __set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -2530,8 +2506,7 @@ static void __set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 1663bc9e45de..56e68db48861 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -45,7 +45,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
@@ -53,6 +52,7 @@
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -1505,7 +1505,7 @@ static void set_multicast_list(struct net_device *dev)
int config = 0, cnt;
DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count,
+ dev->name, netdev_mc_count(dev),
dev->flags & IFF_PROMISC ? "ON" : "OFF",
dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
@@ -1533,7 +1533,7 @@ static void set_multicast_list(struct net_device *dev)
i596_add_cmd(dev, &lp->cf_cmd.cmd);
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT)
{
cnt = MAX_MC_CNT;
@@ -1541,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1550,13 +1550,16 @@ static void set_multicast_list(struct net_device *dev)
return;
cmd = &lp->mc_cmd;
cmd->cmd.command = CmdMulticastList;
- cmd->mc_cnt = dev->mc_count * 6;
+ cmd->mc_cnt = cnt * ETH_ALEN;
cp = cmd->mc_addrs;
- for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
- memcpy(cp, dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (!cnt--)
+ break;
+ memcpy(cp, dmi->dmi_addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
dev->name, cp));
+ cp += ETH_ALEN;
}
i596_add_cmd(dev, &cmd->cmd);
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e58a65391ad2..7b832c727f87 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -90,6 +90,18 @@ config MACVLAN
To compile this driver as a module, choose M here: the module
will be called macvlan.
+config MACVTAP
+ tristate "MAC-VLAN based tap driver (EXPERIMENTAL)"
+ depends on MACVLAN
+ help
+ This adds a specialized tap character device driver that is based
+ on the MAC-VLAN network interface, called macvtap. A macvtap device
+ can be added in the same way as a macvlan device, using 'type
+ macvlan', and then be accessed through the tap user space interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macvtap.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -868,8 +880,8 @@ config BFIN_RX_DESC_NUM
Set the number of buffer packets used in driver.
config BFIN_MAC_RMII
- bool "RMII PHY Interface (EXPERIMENTAL)"
- depends on BFIN_MAC && EXPERIMENTAL
+ bool "RMII PHY Interface"
+ depends on BFIN_MAC
default y if BFIN527_EZKIT
default n if BFIN537_STAMP
help
@@ -895,7 +907,7 @@ config SMC91X
select CRC32
select MII
depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \
- MIPS || BLACKFIN || MN10300
+ MIPS || BLACKFIN || MN10300 || COLDFIRE
help
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -920,7 +932,7 @@ config NET_NETX
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
- depends on ARM && ARCH_DAVINCI
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
select PHYLIB
help
This driver supports TI's DaVinci Ethernet .
@@ -983,6 +995,14 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config GRETH
+ tristate "Aeroflex Gaisler GRETH Ethernet MAC support"
+ depends on SPARC
+ select PHYLIB
+ select CRC32
+ help
+ Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC.
+
config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
@@ -1368,6 +1388,17 @@ config AC3200
To compile this driver as a module, choose M here. The module
will be called ac3200.
+config KSZ884X_PCI
+ tristate "Micrel KSZ8841/2 PCI"
+ depends on NET_PCI && PCI
+ select MII
+ select CRC32
+ help
+ This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ksz884x.
+
config APRICOT
tristate "Apricot Xen-II on board Ethernet"
depends on NET_PCI && ISA
@@ -1883,7 +1914,8 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25
+ depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
+ MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -1939,6 +1971,7 @@ config ATL2
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
depends on PPC32 || MICROBLAZE
+ select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
@@ -2346,6 +2379,7 @@ config GELIC_NET
config GELIC_WIRELESS
bool "PS3 Wireless support"
+ depends on WLAN
depends on GELIC_NET
select WIRELESS_EXT
help
@@ -2355,19 +2389,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config GELIC_WIRELESS_OLD_PSK_INTERFACE
- bool "PS3 Wireless private PSK interface (OBSOLETE)"
- depends on GELIC_WIRELESS
- help
- This option retains the obsolete private interface to pass
- the PSK from user space programs to the driver. The PSK
- stands for 'Pre Shared Key' and is used for WPA[2]-PSK
- (WPA-Personal) environment.
- If WPA[2]-PSK is used and you need to use old programs that
- support only this old interface, say Y. Otherwise N.
-
- If unsure, say N.
-
config FSL_PQ_MDIO
tristate "Freescale PQ MDIO"
depends on FSL_SOC
@@ -2561,6 +2582,31 @@ config CHELSIO_T3
To compile this driver as a module, choose M here: the module
will be called cxgb3.
+config CHELSIO_T4_DEPENDS
+ tristate
+ depends on PCI && INET
+ default y
+
+config CHELSIO_T4
+ tristate "Chelsio Communications T4 Ethernet support"
+ depends on CHELSIO_T4_DEPENDS
+ select FW_LOADER
+ select MDIO
+ help
+ This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+ adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.htm>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called cxgb4.
+
config EHEA
tristate "eHEA Ethernet support"
depends on IBMEBUS && INET && SPARSEMEM
@@ -2616,6 +2662,28 @@ config IXGBE_DCB
If unsure, say N.
+config IXGBEVF
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
@@ -2754,6 +2822,13 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ help
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da9..12b280afdd51 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,10 +14,12 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_IP1000) += ipg.o
obj-$(CONFIG_CHELSIO_T1) += chelsio/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+obj-$(CONFIG_CHELSIO_T4) += cxgb4/
obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_BONDING) += bonding/
@@ -95,6 +97,7 @@ obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
@@ -148,6 +151,7 @@ ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_PPP) += ppp_generic.o
@@ -167,6 +171,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
+obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_LANCE) += lance.o
@@ -246,6 +251,7 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_ENC28J60) += enc28j60.o
obj-$(CONFIG_ETHOC) += ethoc.o
+obj-$(CONFIG_GRETH) += greth.o
obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
@@ -267,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150) += usb/
obj-$(CONFIG_USB_HSO) += usb/
obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
+obj-$(CONFIG_USB_IPHETH) += usb/
obj-y += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index b7ec0368d7e8..ed5e9742be2c 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -46,7 +46,6 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -603,9 +602,8 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
/* set all multicast bits */
@@ -619,9 +617,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a994753..97a3dfd94dfa 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -67,6 +67,7 @@
#include <linux/highmem.h>
#include <linux/sockios.h>
#include <linux/firmware.h>
+#include <linux/slab.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#include <linux/if_vlan.h>
@@ -134,7 +135,7 @@
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
-static struct pci_device_id acenic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
@@ -2845,7 +2846,7 @@ static void ace_set_multicast_list(struct net_device *dev)
* set the entire multicast list at a time and keeping track of
* it here is going to be messy.
*/
- if ((dev->mc_count) && !(ap->mcast_all)) {
+ if (!netdev_mc_empty(dev) && !ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc75..8d58f0a8f42f 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -73,7 +73,6 @@ Revision History:
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -113,7 +112,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
module_param_array(dynamic_ipg, bool, NULL, 0);
MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
-static struct pci_device_id amd8111e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1176,8 +1175,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
} else if (intren0 & RINTEN0) {
- printk("************Driver bug! \
- interrupt while in poll\n");
+ printk("************Driver bug! interrupt while in poll\n");
/* Fix by disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
}
@@ -1378,28 +1376,28 @@ list to the device.
*/
static void amd8111e_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list* mc_ptr;
+ struct dev_mc_list *mc_ptr;
struct amd8111e_priv *lp = netdev_priv(dev);
u32 mc_filter[2] ;
- int i,bit_num;
+ int bit_num;
+
if(dev->flags & IFF_PROMISC){
writel( VAL2 | PROM, lp->mmio + CMD2);
return;
}
else
writel( PROM, lp->mmio + CMD2);
- if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
/* get all multicast packet */
mc_filter[1] = mc_filter[0] = 0xffffffff;
- lp->mc_list = dev->mc_list;
lp->options |= OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
return;
}
- if( dev->mc_count == 0 ){
+ if (netdev_mc_empty(dev)) {
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
- lp->mc_list = NULL;
lp->options &= ~OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
/* disable promiscous mode */
@@ -1408,10 +1406,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
}
/* load all the multicast addresses in the logic filter */
lp->options |= OPTION_MULTICAST_ENABLE;
- lp->mc_list = dev->mc_list;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
- i++, mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, dev) {
bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index 28c60a71ed50..ac36eb6981e3 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -789,7 +789,6 @@ struct amd8111e_priv{
char opened;
struct net_device_stats stats;
unsigned int drv_rx_errors;
- struct dev_mc_list* mc_list;
struct amd8111e_coalesce_conf coal_conf;
struct ipg_info ipg_data;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 73b38c204eb9..6f8d6206b5c4 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -56,7 +56,6 @@ static const char *version =
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index eb0448b03f41..79636ee35829 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -31,6 +31,7 @@
#include <linux/ip.h>
#include <linux/atalk.h>
#include <linux/if_arp.h>
+#include <linux/slab.h>
#include <net/route.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index dbfbd3b7ff86..6af65b656f31 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -215,7 +215,6 @@ static int dma;
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -228,6 +227,7 @@ static int dma;
#include <linux/timer.h>
#include <linux/atalk.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/dma.h>
@@ -1125,7 +1125,6 @@ struct net_device * __init ltpc_probe(void)
printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
dev->netdev_ops = &ltpc_netdev;
- dev->mc_list = NULL;
dev->base_addr = io;
dev->irq = irq;
dev->dma = dma;
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 8ea9c7545c12..705e6ce2eb90 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -25,6 +25,7 @@
*/
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index e6afab2455b1..9efbbbae47ca 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/bootmem.h>
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 66bcbbb6babc..355797f70048 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -27,6 +27,7 @@
*/
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index db08fc24047a..0402da30a4ed 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754d..2c712af6c265 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/init.h>
@@ -144,7 +143,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
free_netdev(dev);
}
-static struct pci_device_id com20020pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 0a74f21409c5..c9e459400ff9 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 28dea518d554..4cb401813b7e 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/bootmem.h>
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 112e230cb13d..f3b46f71e293 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -30,6 +30,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <linux/arcdevice.h>
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index 06f8fa2f8f2f..f81db4070a57 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -24,6 +24,7 @@
* **********************
*/
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index 745530651c45..b71431aae084 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -23,6 +23,7 @@
*
* **********************
*/
+#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index c35af3e106b1..fa1a2354f5f9 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -40,7 +40,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
@@ -123,9 +122,7 @@ static void ariadne_reset(struct net_device *dev);
static irqreturn_t ariadne_interrupt(int irq, void *data);
static int ariadne_close(struct net_device *dev);
static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
-#ifdef HAVE_MULTICAST
static void set_multicast_list(struct net_device *dev);
-#endif
static void memcpyw(volatile u_short *dest, u_short *src, int len)
@@ -821,7 +818,7 @@ static void set_multicast_list(struct net_device *dev)
lance->RDP = PROM; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer filtering. */
memset(multicast_table, (num_addrs == 0) ? 0 : -1,
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index c37ee9e6b67b..39e1c0d39476 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -68,6 +68,7 @@ config W90P910_ETH
tristate "Nuvoton w90p910 Ethernet support"
depends on ARM && ARCH_W90X900
select PHYLIB
+ select MII
help
Say Y here if you want to use built-in Ethernet ports
on w90p910 processor.
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 164b37e85eea..f1f58c5e27bf 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -351,13 +351,13 @@ static struct net_device_stats *am79c961_getstats (struct net_device *dev)
return &priv->stats;
}
-static void am79c961_mc_hash(struct dev_mc_list *dmi, unsigned short *hash)
+static void am79c961_mc_hash(char *addr, unsigned short *hash)
{
- if (dmi->dmi_addrlen == ETH_ALEN && dmi->dmi_addr[0] & 0x01) {
+ if (addr[0] & 0x01) {
int idx, bit;
u32 crc;
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc = ether_crc_le(ETH_ALEN, addr);
idx = crc >> 30;
bit = (crc >> 26) & 15;
@@ -387,8 +387,8 @@ static void am79c961_setmulticastlist (struct net_device *dev)
memset(multi_hash, 0x00, sizeof(multi_hash));
- for (dmi = dev->mc_list; dmi; dmi = dmi->next)
- am79c961_mc_hash(dmi, multi_hash);
+ netdev_for_each_mc_addr(dmi, dev)
+ am79c961_mc_hash(dmi->dmi_addr, multi_hash);
}
spin_lock_irqsave(&priv->chip_lock, flags);
@@ -680,7 +680,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
#endif
};
-static int __init am79c961_probe(struct platform_device *pdev)
+static int __devinit am79c961_probe(struct platform_device *pdev)
{
struct resource *res;
struct net_device *dev;
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index c8bc60a7040c..aed5b5479b50 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -27,6 +27,7 @@
#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -558,14 +559,11 @@ static void at91ether_sethashtable(struct net_device *dev)
{
struct dev_mc_list *curr;
unsigned long mc_filter[2];
- unsigned int i, bitnr;
+ unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
- curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
- if (!curr) break; /* unexpected end of list */
-
+ netdev_for_each_mc_addr(curr, dev) {
bitnr = hash_get_index(curr->dmi_addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -592,7 +590,7 @@ static void at91ether_set_multicast_list(struct net_device *dev)
at91_emac_write(AT91_EMAC_HSH, -1);
at91_emac_write(AT91_EMAC_HSL, -1);
cfg |= AT91_EMAC_MTI;
- } else if (dev->mc_count > 0) { /* Enable specific multicasts */
+ } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
at91ether_sethashtable(dev);
cfg |= AT91_EMAC_MTI;
} else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895c..6995169d285a 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
* (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -20,9 +22,10 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <mach/ep93xx-regs.h>
-#include <mach/platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <mach/hardware.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +188,47 @@ struct ep93xx_priv
#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg);
+static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int data;
+ int i;
+
+ wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_info("mdio read timed out\n");
+ data = 0xffff;
+ } else {
+ data = rdl(ep, REG_MIIDATA);
+ }
+
+ return data;
+}
+
+static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int i;
+
+ wrl(ep, REG_MIIDATA, data);
+ wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10)
+ pr_info("mdio write timed out\n");
+}
static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
{
@@ -217,14 +260,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
rstat->rstat1 = 0;
if (!(rstat0 & RSTAT0_EOF))
- printk(KERN_CRIT "ep93xx_rx: not end-of-frame "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_EOB))
- printk(KERN_CRIT "ep93xx_rx: not end-of-buffer "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
- printk(KERN_CRIT "ep93xx_rx: entry mismatch "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
ep->stats.rx_errors++;
@@ -241,8 +281,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
length = rstat1 & RSTAT1_FRAME_LENGTH;
if (length > MAX_PKT_SIZE) {
- printk(KERN_NOTICE "ep93xx_rx: invalid length "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
goto err;
}
@@ -371,11 +410,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
tstat->tstat0 = 0;
if (tstat0 & TSTAT0_FA)
- printk(KERN_CRIT "ep93xx_tx_complete: frame aborted "
- " %.8x\n", tstat0);
+ pr_crit("frame aborted %.8x\n", tstat0);
if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
- printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch "
- " %.8x\n", tstat0);
+ pr_crit("entry mismatch %.8x\n", tstat0);
if (tstat0 & TSTAT0_TXWE) {
int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +573,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
return 1;
}
@@ -581,7 +618,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n");
+ pr_crit("hw failed to start\n");
return 1;
}
@@ -617,7 +654,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
}
if (i == 10)
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
}
static int ep93xx_open(struct net_device *dev)
@@ -681,48 +718,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
}
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int data;
- int i;
-
- wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10) {
- printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
- data = 0xffff;
- } else {
- data = rdl(ep, REG_MIIDATA);
- }
-
- return data;
-}
-
-static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int i;
-
- wrl(ep, REG_MIIDATA, data);
- wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10)
- printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
-}
-
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +820,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
+ int irq;
int err;
if (pdev == NULL)
return -ENODEV;
data = pdev->dev.platform_data;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq < 0)
+ return -ENXIO;
+
dev = ep93xx_dev_alloc(data);
if (dev == NULL) {
err = -ENOMEM;
@@ -842,23 +844,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- ep->res = request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
- dev_name(&pdev->dev));
+ ep->res = request_mem_region(mem->start, resource_size(mem),
+ dev_name(&pdev->dev));
if (ep->res == NULL) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
- ep->base_addr = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start);
+ ep->base_addr = ioremap(mem->start, resource_size(mem));
if (ep->base_addr == NULL) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
err = -EIO;
goto err_out;
}
- ep->irq = pdev->resource[1].start;
+ ep->irq = irq;
ep->mii.phy_id = data->phy_id;
ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +877,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, "
- "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
- ep->irq, data->dev_addr[0], data->dev_addr[1],
- data->dev_addr[2], data->dev_addr[3],
- data->dev_addr[4], data->dev_addr[5]);
+ printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
+ dev->name, ep->irq, dev->dev_addr);
return 0;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1f7a69c929a6..d9de9bce2395 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -463,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index f52f668c49bf..4af235d41fda 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -33,7 +33,6 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index c3dfbdd2cdcf..6be8b098b8b4 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
@@ -735,22 +736,25 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct dev_mc_list *mclist = dev->mc_list;
+ struct dev_mc_list *mclist;
u8 diffs[ETH_ALEN], *addr;
- int cnt = dev->mc_count, i;
+ int i;
- if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
+ if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
&port->regs->rx_control[0]);
return;
}
memset(diffs, 0, ETH_ALEN);
- addr = mclist->dmi_addr; /* first MAC address */
- while (--cnt && (mclist = mclist->next))
+ addr = NULL;
+ netdev_for_each_mc_addr(mclist, dev) {
+ if (!addr)
+ addr = mclist->dmi_addr; /* first MAC address */
for (i = 0; i < ETH_ALEN; i++)
diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
+ }
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(addr[i], &port->regs->mcast_addr[i]);
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index be256b34cea8..84f8a8f73802 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <asm/irq.h>
@@ -327,25 +328,24 @@ ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
*/
static void
ks8695_init_partial_multicast(struct ks8695_priv *ksp,
- struct dev_mc_list *addr,
- int nr_addr)
+ struct net_device *ndev)
{
u32 low, high;
int i;
+ struct dev_mc_list *dmi;
- for (i = 0; i < nr_addr; i++, addr = addr->next) {
- /* Ran out of addresses? */
- if (!addr)
- break;
+ i = 0;
+ netdev_for_each_mc_addr(dmi, ndev) {
/* Ran out of space in chip? */
BUG_ON(i == KS8695_NR_ADDRESSES);
- low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) |
- (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]);
- high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]);
+ low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) |
+ (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]);
+ high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]);
ks8695_writereg(ksp, KS8695_AAL_(i), low);
ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
+ i++;
}
/* Clear the remaining Additional Station Addresses */
@@ -450,11 +450,10 @@ ks8695_rx_irq(int irq, void *dev_id)
}
/**
- * ks8695_rx - Receive packets called by NAPI poll method
+ * ks8695_rx - Receive packets called by NAPI poll method
* @ksp: Private data for the KS8695 Ethernet
- * @budget: The max packets would be receive
+ * @budget: Number of packets allowed to process
*/
-
static int ks8695_rx(struct ks8695_priv *ksp, int budget)
{
struct net_device *ndev = ksp->ndev;
@@ -462,7 +461,6 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
int buff_n;
u32 flags;
int pktlen;
- int last_rx_processed = -1;
int received = 0;
buff_n = ksp->next_rx_desc_read;
@@ -472,6 +470,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
cpu_to_le32(RDES_OWN)))) {
rmb();
flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
+
/* Found an SKB which we own, this means we
* received a packet
*/
@@ -534,23 +533,18 @@ rx_failure:
ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
rx_finished:
received++;
- /* And note this as processed so we can start
- * from here next time
- */
- last_rx_processed = buff_n;
buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
- /*And note which RX descriptor we last did */
- if (likely(last_rx_processed != -1))
- ksp->next_rx_desc_read =
- (last_rx_processed + 1) &
- MAX_RX_DESC_MASK;
}
+
+ /* And note which RX descriptor we last did */
+ ksp->next_rx_desc_read = buff_n;
+
/* And refill the buffers */
ks8695_refill_rxbuffers(ksp);
- /* Kick the RX DMA engine, in case it became
- * suspended */
+ /* Kick the RX DMA engine, in case it became suspended */
ks8695_writereg(ksp, KS8695_DRSC, 0);
+
return received;
}
@@ -576,9 +570,9 @@ static int ks8695_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
unsigned long flags;
spin_lock_irqsave(&ksp->rx_lock, flags);
+ __napi_complete(napi);
/*enable rx interrupt*/
writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
- __napi_complete(napi);
spin_unlock_irqrestore(&ksp->rx_lock, flags);
}
return work_done;
@@ -1207,7 +1201,7 @@ ks8695_set_multicast(struct net_device *ndev)
if (ndev->flags & IFF_ALLMULTI) {
/* enable all multicast mode */
ctrl |= DRXC_RM;
- } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
+ } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
/* more specific multicast addresses than can be
* handled in hardware
*/
@@ -1215,8 +1209,7 @@ ks8695_set_multicast(struct net_device *ndev)
} else {
/* enable specific multicasts */
ctrl &= ~DRXC_RM;
- ks8695_init_partial_multicast(ksp, ndev->mc_list,
- ndev->mc_count);
+ ks8695_init_partial_multicast(ksp, ndev);
}
ks8695_writereg(ksp, KS8695_DRXC, ctrl);
@@ -1335,7 +1328,6 @@ ks8695_stop(struct net_device *ndev)
netif_stop_queue(ndev);
napi_disable(&ksp->napi);
- netif_carrier_off(ndev);
ks8695_shutdown(ksp);
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index b7f3866d546f..f7c9ca1dfb17 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -18,6 +18,7 @@
#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/gfp.h>
#define DRV_MODULE_NAME "w90p910-emc"
#define DRV_MODULE_VERSION "0.1"
@@ -858,10 +859,10 @@ static void w90p910_ether_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_list)
- rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
- else
- rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
+ rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+ else
+ rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
__raw_writel(rx_mode, ether->reg + REG_CAMCMR);
}
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index b14f4799d5d1..10a20fb9ae65 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -47,7 +47,6 @@
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -839,21 +838,19 @@ set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
- int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit >> 3] |= (1 << bit);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index c5721cb38265..a8686bfec7a1 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -53,7 +53,6 @@ static char version[] = "atarilance.c: v1.3 04/04/96 "
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bitops.h>
@@ -663,7 +662,7 @@ static int lance_open( struct net_device *dev )
while (--i > 0)
if (DREG & CSR0_IDON)
break;
- if (i < 0 || (DREG & CSR0_ERR)) {
+ if (i <= 0 || (DREG & CSR0_ERR)) {
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;
@@ -1097,7 +1096,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index efe5435bc3d3..84ae905bf732 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -313,6 +313,9 @@ enum atl1c_rss_type {
enum atl1c_nic_type {
athr_l1c = 0,
athr_l2c = 1,
+ athr_l2c_b,
+ athr_l2c_b2,
+ athr_l1d,
};
enum atl1c_trans_queue {
@@ -426,8 +429,12 @@ struct atl1c_hw {
#define ATL1C_ASPM_L1_SUPPORT 0x0100
#define ATL1C_ASPM_CTRL_MON 0x0200
#define ATL1C_HIB_DISABLE 0x0400
-#define ATL1C_LINK_CAP_1000M 0x0800
-#define ATL1C_FPGA_VERSION 0x8000
+#define ATL1C_APS_MODE_ENABLE 0x0800
+#define ATL1C_LINK_EXT_SYNC 0x1000
+#define ATL1C_CLK_GATING_EN 0x2000
+#define ATL1C_FPGA_VERSION 0x8000
+ u16 link_cap_flags;
+#define ATL1C_LINK_CAP_1000M 0x0001
u16 cmb_tpd;
u16 cmb_rrd;
u16 cmb_rx_timer; /* 2us resolution */
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 9b1e0eaebb5c..32339243d61f 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -22,6 +22,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#include <linux/slab.h>
#include "atl1c.h"
@@ -37,7 +38,7 @@ static int atl1c_get_settings(struct net_device *netdev,
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
- if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M)
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M)
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 3e69b940b8f7..f1389d664a21 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -70,17 +70,39 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
u32 otp_ctrl_data;
u32 twsi_ctrl_data;
u8 eth_addr[ETH_ALEN];
+ u16 phy_data;
+ bool raise_vol = false;
/* init */
addr[0] = addr[1] = 0;
AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
if (atl1c_check_eeprom_exist(hw)) {
- /* Enable OTP CLK */
- if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
- otp_ctrl_data |= OTP_CTRL_CLK_EN;
- AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
- AT_WRITE_FLUSH(hw);
- msleep(1);
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+ /* Enable OTP CLK */
+ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
+ otp_ctrl_data |= OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(1);
+ }
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFF7F;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x8;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ raise_vol = true;
}
AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
@@ -96,11 +118,31 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
return -1;
}
/* Disable OTP_CLK */
- if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
- otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
- AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
- AT_WRITE_FLUSH(hw);
- msleep(1);
+ if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
+ if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
+ otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(1);
+ }
+ }
+ if (raise_vol) {
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x80;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFFF7;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ }
}
/* maybe MAC-address is from BIOS */
@@ -114,6 +156,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
return 0;
}
+out:
return -1;
}
@@ -307,7 +350,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL;
- if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M) {
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) {
if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half)
mii_giga_ctrl_data |= ADVERTISE_1000HALF;
if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full)
@@ -389,6 +432,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
{
struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
+ u16 phy_data;
u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
int err;
@@ -404,6 +448,21 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
AT_WRITE_FLUSH(hw);
msleep(10);
+ if (hw->nic_type == athr_l2c_b) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF);
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
+ msleep(20);
+ }
+
/*Enable PHY LinkChange Interrupt */
err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
if (err) {
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index c2c738df5c63..1eeb3ed9f0cb 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -57,6 +57,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define REG_LINK_CTRL 0x68
#define LINK_CTRL_L0S_EN 0x01
#define LINK_CTRL_L1_EN 0x02
+#define LINK_CTRL_EXT_SYNC 0x80
#define REG_VPD_CAP 0x6C
#define VPD_CAP_ID_MASK 0xff
@@ -156,6 +157,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
#define PM_CTRL_LCKDET_TIMER_MASK 0x3F
#define PM_CTRL_LCKDET_TIMER_SHIFT 24
+#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
+#define PM_CTRL_SA_DLY_EN 0x20000000
#define PM_CTRL_MAC_ASPM_CHK 0x40000000
#define PM_CTRL_HOTRST 0x80000000
@@ -314,6 +317,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define MAC_CTRL_BC_EN 0x4000000
#define MAC_CTRL_DBG 0x8000000
#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000
+#define MAC_CTRL_HASH_ALG_CRC32 0x20000000
+#define MAC_CTRL_SPEED_MODE_SW 0x40000000
/* MAC IPG/IFG Control Register */
#define REG_MAC_IPG_IFG 0x1484
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0b..50dc531a02d8 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,11 +21,18 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.0.1-NAPI"
+#define ATL1C_DRV_VERSION "1.0.0.2-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
+#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
+
+#define L2CB_V10 0xc0
+#define L2CB_V11 0xc1
+
/*
* atl1c_pci_tbl - PCI Device ID Table
*
@@ -35,9 +42,12 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1c_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
/* required last entry */
{ 0 }
};
@@ -367,7 +377,7 @@ static void atl1c_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
atl1c_hash_set(hw, hash_value);
}
@@ -593,11 +603,18 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
case PCI_DEVICE_ID_ATTANSIC_L2C:
hw->nic_type = athr_l2c;
break;
-
case PCI_DEVICE_ID_ATTANSIC_L1C:
hw->nic_type = athr_l1c;
break;
-
+ case PCI_DEVICE_ID_ATHEROS_L2C_B:
+ hw->nic_type = athr_l2c_b;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L2C_B2:
+ hw->nic_type = athr_l2c_b2;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L1D:
+ hw->nic_type = athr_l1d;
+ break;
default:
break;
}
@@ -620,10 +637,13 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
if (link_ctrl_data & LINK_CTRL_L1_EN)
hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
+ if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
+ hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
- if (hw->nic_type == athr_l1c) {
+ if (hw->nic_type == athr_l1c ||
+ hw->nic_type == athr_l1d) {
hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
- hw->ctrl_flags |= ATL1C_LINK_CAP_1000M;
+ hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
}
return 0;
}
@@ -1234,21 +1254,92 @@ static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
{
u32 pm_ctrl_data;
+ u32 link_ctrl_data;
AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
-
+ AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
+
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
+ PM_CTRL_LCKDET_TIMER_SHIFT);
pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data |= PM_CTRL_RBER_EN;
+ pm_ctrl_data |= PM_CTRL_SDES_EN;
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2) {
+ link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
+ if (hw->nic_type == athr_l2c_b &&
+ hw->revision_id == L2CB_V10)
+ link_ctrl_data |= LINK_CTRL_EXT_SYNC;
+ }
+
+ AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
+
+ pm_ctrl_data |= PM_CTRL_PCIE_RECV;
+ pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT;
+ pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S;
+ pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
+ pm_ctrl_data &= ~PM_CTRL_HOTRST;
+ pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
+ }
if (linkup) {
- pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
- pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2) {
+ if (hw->nic_type == athr_l2c_b)
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
+ pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+ if (hw->adapter->link_speed == SPEED_100 ||
+ hw->adapter->link_speed == SPEED_1000) {
+ pm_ctrl_data &=
+ ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ if (hw->nic_type == athr_l1d)
+ pm_ctrl_data |= 0xF <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ else
+ pm_ctrl_data |= 7 <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ }
+ } else {
+ pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ }
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+ if (hw->adapter->link_speed == SPEED_10)
+ if (hw->nic_type == athr_l1d)
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
+ else
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+ else if (hw->adapter->link_speed == SPEED_100)
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
+ else
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
- pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
- pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
} else {
pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
@@ -1302,6 +1393,10 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
+ if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) {
+ mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
+ mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
+ }
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
}
@@ -2596,11 +2691,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
- dev_dbg(&pdev->dev,
- "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ dev_dbg(&pdev->dev, "mac address : %pM\n",
+ adapter->hw.mac_addr);
atl1c_hw_set_mac_addr(&adapter->hw);
INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index a76006c1bc6b..ffd696ee7c8e 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -22,6 +22,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#include <linux/slab.h>
#include "atl1e.h"
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 4a7700620119..76cc043def8c 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -394,7 +394,6 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
int atl1e_phy_commit(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 phy_data;
@@ -415,12 +414,12 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
}
if (0 != (val & (MDIO_START | MDIO_BUSY))) {
- dev_err(&pdev->dev,
- "pcie linkdown at least for 25ms\n");
+ netdev_err(adapter->netdev,
+ "pcie linkdown at least for 25ms\n");
return ret_val;
}
- dev_err(&pdev->dev, "pcie linkup after %d ms\n", i);
+ netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i);
}
return 0;
}
@@ -428,7 +427,6 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int atl1e_phy_init(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
s32 ret_val;
u16 phy_val;
@@ -492,20 +490,22 @@ int atl1e_phy_init(struct atl1e_hw *hw)
/*Enable PHY LinkChange Interrupt */
ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
if (ret_val) {
- dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n");
+ netdev_err(adapter->netdev,
+ "Error enable PHY linkChange Interrupt\n");
return ret_val;
}
/* setup AutoNeg parameters */
ret_val = atl1e_phy_setup_autoneg_adv(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n");
+ netdev_err(adapter->netdev,
+ "Error Setting up Auto-Negotiation\n");
return ret_val;
}
/* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
- dev_dbg(&pdev->dev, "Restarting Auto-Neg");
+ netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n");
ret_val = atl1e_phy_commit(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Resetting the phy");
+ netdev_err(adapter->netdev, "Error resetting the phy\n");
return ret_val;
}
@@ -559,9 +559,8 @@ int atl1e_reset_hw(struct atl1e_hw *hw)
}
if (timeout >= AT_HW_MAX_IDLE_DELAY) {
- dev_err(&pdev->dev,
- "MAC state machine cann't be idle since"
- " disabled for 10ms second\n");
+ netdev_err(adapter->netdev,
+ "MAC state machine can't be idle since disabled for 10ms second\n");
return AT_ERR_TIMEOUT;
}
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9b..73302ae468aa 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
/* required last entry */
@@ -164,11 +164,10 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
int err = 0;
u16 speed, duplex, phy_data;
- /* MII_BMSR must read twise */
+ /* MII_BMSR must read twice */
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
if ((phy_data & BMSR_LSTATUS) == 0) {
@@ -195,12 +194,11 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
adapter->link_speed = speed;
adapter->link_duplex = duplex;
atl1e_setup_mac_ctrl(adapter);
- dev_info(&pdev->dev,
- "%s: %s NIC Link is Up<%d Mbps %s>\n",
- atl1e_driver_name, netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex");
+ netdev_info(netdev,
+ "NIC Link is Up <%d Mbps %s Duplex>\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full" : "Half");
}
if (!netif_carrier_ok(netdev)) {
@@ -230,7 +228,6 @@ static void atl1e_link_chg_task(struct work_struct *work)
static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
u16 phy_data = 0;
u16 link_up = 0;
@@ -243,8 +240,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
if (!link_up) {
if (netif_carrier_ok(netdev)) {
/* old link state: Up */
- dev_info(&pdev->dev, "%s: %s NIC Link is Down\n",
- atl1e_driver_name, netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
adapter->link_speed = SPEED_0;
netif_stop_queue(netdev);
}
@@ -311,7 +307,7 @@ static void atl1e_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
atl1e_hash_set(hw, hash_value);
}
@@ -321,10 +317,9 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
u32 mac_ctrl_data = 0;
- dev_dbg(&pdev->dev, "atl1e_vlan_rx_register\n");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_irq_disable(adapter);
@@ -345,9 +340,7 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
-
- dev_dbg(&pdev->dev, "atl1e_restore_vlan !");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp);
}
/*
@@ -391,7 +384,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ netdev_warn(adapter->netdev, "invalid MTU setting\n");
return -EINVAL;
}
/* set MTU */
@@ -438,7 +431,6 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int retval = 0;
@@ -466,8 +458,8 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
goto out;
}
- dev_dbg(&pdev->dev, "<atl1e_mii_ioctl> write %x %x",
- data->reg_num, data->val_in);
+ netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
+ data->reg_num, data->val_in);
if (atl1e_write_phy_reg(&adapter->hw,
data->reg_num, data->val_in)) {
retval = -EIO;
@@ -602,7 +594,7 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
hw->dmaw_dly_cnt = 4;
if (atl1e_alloc_queues(adapter)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -800,8 +792,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
adapter->ring_size, &adapter->ring_dma);
if (adapter->ring_vir_addr == NULL) {
- dev_err(&pdev->dev, "pci_alloc_consistent failed, "
- "size = D%d", size);
+ netdev_err(adapter->netdev,
+ "pci_alloc_consistent failed, size = D%d\n", size);
return -ENOMEM;
}
@@ -817,7 +809,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
- dev_err(&pdev->dev, "kzalloc failed , size = D%d", size);
+ netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
+ size);
err = -ENOMEM;
goto failed;
}
@@ -852,8 +845,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
}
if (unlikely(offset > adapter->ring_size)) {
- dev_err(&pdev->dev, "offset(%d) > ring size(%d) !!\n",
- offset, adapter->ring_size);
+ netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
err = -1;
goto failed;
}
@@ -1077,7 +1070,6 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
static int atl1e_configure(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
u32 intr_status_data = 0;
@@ -1130,8 +1122,8 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
intr_status_data = AT_READ_REG(hw, REG_ISR);
if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
- dev_err(&pdev->dev, "atl1e_configure failed,"
- "PCIE phy link down\n");
+ netdev_err(adapter->netdev,
+ "atl1e_configure failed, PCIE phy link down\n");
return -1;
}
@@ -1262,7 +1254,6 @@ static irqreturn_t atl1e_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct atl1e_hw *hw = &adapter->hw;
int max_ints = AT_MAX_INT_WORK;
int handled = IRQ_NONE;
@@ -1285,8 +1276,8 @@ static irqreturn_t atl1e_intr(int irq, void *data)
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
if (status & ISR_PHY_LINKDOWN) {
- dev_err(&pdev->dev,
- "pcie phy linkdown %x\n", status);
+ netdev_err(adapter->netdev,
+ "pcie phy linkdown %x\n", status);
if (netif_running(adapter->netdev)) {
/* reset MAC */
atl1e_irq_reset(adapter);
@@ -1297,9 +1288,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
/* check if DMA read/write error */
if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
- dev_err(&pdev->dev,
- "PCIE DMA RW error (status = 0x%x)\n",
- status);
+ netdev_err(adapter->netdev,
+ "PCIE DMA RW error (status = 0x%x)\n",
+ status);
atl1e_irq_reset(adapter);
schedule_work(&adapter->reset_task);
break;
@@ -1382,7 +1373,6 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
- struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
&adapter->rx_ring;
@@ -1404,11 +1394,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
rx_page->read_offset);
/* check sequence number */
if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
- dev_err(&pdev->dev,
- "rx sequence number"
- " error (rx=%d) (expect=%d)\n",
- prrs->seq_num,
- rx_page_desc[que].rx_nxseq);
+ netdev_err(netdev,
+ "rx sequence number error (rx=%d) (expect=%d)\n",
+ prrs->seq_num,
+ rx_page_desc[que].rx_nxseq);
rx_page_desc[que].rx_nxseq++;
/* just for debug use */
AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
@@ -1424,9 +1413,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_ERR_DRIBBLE | RRS_ERR_CODE |
RRS_ERR_TRUNC)) {
/* hardware error, discard this packet*/
- dev_err(&pdev->dev,
- "rx packet desc error %x\n",
- *((u32 *)prrs + 1));
+ netdev_err(netdev,
+ "rx packet desc error %x\n",
+ *((u32 *)prrs + 1));
goto skip_pkt;
}
}
@@ -1435,8 +1424,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
if (skb == NULL) {
- dev_warn(&pdev->dev, "%s: Memory squeeze,"
- "deferring packet.\n", netdev->name);
+ netdev_warn(netdev,
+ "Memory squeeze, deferring packet\n");
goto skip_pkt;
}
skb->dev = netdev;
@@ -1450,9 +1439,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
u16 vlan_tag = (prrs->vtag >> 4) |
((prrs->vtag & 7) << 13) |
((prrs->vtag & 8) << 9);
- dev_dbg(&pdev->dev,
- "RXD VLAN TAG<RRD>=0x%04x\n",
- prrs->vtag);
+ netdev_dbg(netdev,
+ "RXD VLAN TAG<RRD>=0x%04x\n",
+ prrs->vtag);
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
vlan_tag);
} else {
@@ -1500,7 +1489,6 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
{
struct atl1e_adapter *adapter =
container_of(napi, struct atl1e_adapter, napi);
- struct pci_dev *pdev = adapter->pdev;
u32 imr_data;
int work_done = 0;
@@ -1519,8 +1507,8 @@ quit_polling:
/* test debug */
if (test_bit(__AT_DOWN, &adapter->flags)) {
atomic_dec(&adapter->irq_sem);
- dev_err(&pdev->dev,
- "atl1e_clean is called when AT_DOWN\n");
+ netdev_err(adapter->netdev,
+ "atl1e_clean is called when AT_DOWN\n");
}
/* reenable RX intr */
/*atl1e_irq_enable(adapter); */
@@ -1618,7 +1606,6 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
static int atl1e_tso_csum(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
- struct pci_dev *pdev = adapter->pdev;
u8 hdr_len;
u32 real_len;
unsigned short offload_type;
@@ -1642,8 +1629,8 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
- dev_warn(&pdev->dev,
- "IPV4 tso with zero data??\n");
+ netdev_warn(adapter->netdev,
+ "IPV4 tso with zero data??\n");
goto check_sum;
} else {
ip_hdr(skb)->check = 0;
@@ -1672,8 +1659,8 @@ check_sum:
cso = skb_transport_offset(skb);
if (unlikely(cso & 0x1)) {
- dev_err(&adapter->pdev->dev,
- "pay load offset should not ant event number\n");
+ netdev_err(adapter->netdev,
+ "payload offset should not ant event number\n");
return -1;
} else {
css = cso + skb->csum_offset;
@@ -1886,8 +1873,8 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
adapter->have_msi = true;
err = pci_enable_msi(adapter->pdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = false;
} else
netdev->irq = pdev->irq;
@@ -1898,13 +1885,13 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
netdev->name, netdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
return err;
}
- dev_dbg(&pdev->dev, "atl1e_request_irq OK\n");
+ netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
return err;
}
@@ -2078,7 +2065,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
- dev_dbg(&pdev->dev, "set phy register failed\n");
+ netdev_dbg(adapter->netdev, "set phy register failed\n");
goto wol_dis;
}
@@ -2100,17 +2087,14 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
}
if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
- dev_dbg(&pdev->dev,
- "%s: Link may change"
- "when suspend\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "Link may change when suspend\n");
}
wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
/* only link up can wake up */
if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
- dev_dbg(&pdev->dev, "%s: read write phy "
- "register failed.\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "read write phy register failed\n");
goto wol_dis;
}
}
@@ -2131,9 +2115,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc & AT_WUFC_MAG)
mac_ctrl_data |= MAC_CTRL_BC_EN;
- dev_dbg(&pdev->dev,
- "%s: suspend MAC=0x%x\n",
- atl1e_driver_name, mac_ctrl_data);
+ netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
+ mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
@@ -2183,8 +2166,8 @@ static int atl1e_resume(struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "ATL1e: Cannot enable PCI"
- " device from suspend\n");
+ netdev_err(adapter->netdev,
+ "Cannot enable PCI device from suspend\n");
return err;
}
@@ -2315,7 +2298,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
err = atl1e_init_netdev(netdev, pdev);
if (err) {
- dev_err(&pdev->dev, "init netdevice failed\n");
+ netdev_err(netdev, "init netdevice failed\n");
goto err_init_netdev;
}
adapter = netdev_priv(netdev);
@@ -2326,7 +2309,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
if (!adapter->hw.hw_addr) {
err = -EIO;
- dev_err(&pdev->dev, "cannot map device registers\n");
+ netdev_err(netdev, "cannot map device registers\n");
goto err_ioremap;
}
netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
@@ -2356,7 +2339,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
/* setup the private structure */
err = atl1e_sw_init(adapter);
if (err) {
- dev_err(&pdev->dev, "net device private data init failed\n");
+ netdev_err(netdev, "net device private data init failed\n");
goto err_sw_init;
}
@@ -2372,22 +2355,19 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
if (atl1e_read_mac_addr(&adapter->hw) != 0) {
err = -EIO;
- dev_err(&pdev->dev, "get mac address failed\n");
+ netdev_err(netdev, "get mac address failed\n");
goto err_eeprom;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "register netdevice failed\n");
+ netdev_err(netdev, "register netdevice failed\n");
goto err_register;
}
@@ -2488,8 +2468,8 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev,
- "ATL1e: Cannot re-enable PCI device after reset.\n");
+ netdev_err(adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -2517,8 +2497,8 @@ static void atl1e_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (atl1e_up(adapter)) {
- dev_err(&pdev->dev,
- "ATL1e: can't bring device back up after reset\n");
+ netdev_err(adapter->netdev,
+ "can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c
index b3be59fd3fb5..0ce60b6e7ef0 100644
--- a/drivers/net/atl1e/atl1e_param.c
+++ b/drivers/net/atl1e/atl1e_param.c
@@ -116,7 +116,7 @@ struct atl1e_option {
} arg;
};
-static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev)
+static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -127,16 +127,19 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- dev_info(&pdev->dev, "%s Enabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- dev_info(&pdev->dev, "%s Disabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value);
+ netdev_info(adapter->netdev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -148,8 +151,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- dev_info(&pdev->dev, "%s\n",
- ent->str);
+ netdev_info(adapter->netdev,
+ "%s\n", ent->str);
return 0;
}
}
@@ -159,8 +162,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
BUG();
}
- dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -176,11 +179,13 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
*/
void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
+
if (bd >= ATL1E_MAX_NIC) {
- dev_notice(&pdev->dev, "no configuration for board #%i\n", bd);
- dev_notice(&pdev->dev, "Using defaults for all values\n");
+ netdev_notice(adapter->netdev,
+ "no configuration for board #%i\n", bd);
+ netdev_notice(adapter->netdev,
+ "Using defaults for all values\n");
}
{ /* Transmit Ring Size */
@@ -196,7 +201,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_tx_desc_cnt > bd) {
val = tx_desc_cnt[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->tx_ring.count = (u16) val & 0xFFFC;
} else
adapter->tx_ring.count = (u16)opt.def;
@@ -215,7 +220,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_rx_mem_size > bd) {
val = rx_mem_size[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->rx_ring.page_size = (u32)val * 1024;
} else {
adapter->rx_ring.page_size = (u32)opt.def * 1024;
@@ -235,7 +240,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.imt = (u16) val;
} else
adapter->hw.imt = (u16)(opt.def);
@@ -254,7 +259,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_media_type > bd) {
val = media_type[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.media_type = (u16) val;
} else
adapter->hw.media_type = (u16)(opt.def);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127c..0ebd8208f606 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -84,7 +84,7 @@
#define ATLX_DRIVER_VERSION "2.1.3"
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
- Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(ATLX_DRIVER_VERSION);
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
/*
* atl1_pci_tbl - PCI Device ID Table
*/
-static const struct pci_device_id atl1_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
/* required last entry */
{0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index c0451d75cdcf..54662f24f9bb 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -39,6 +39,7 @@
#include <linux/pci_ids.h>
#include <linux/pm.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/tcp.h>
@@ -63,7 +64,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
*/
-static struct pci_device_id atl2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
/* required last entry */
{0,}
@@ -157,7 +158,7 @@ static void atl2_set_multi(struct net_device *netdev)
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
atl2_hash_set(hw, hash_value);
}
@@ -1959,12 +1960,15 @@ static int atl2_get_eeprom(struct net_device *netdev,
return -ENOMEM;
for (i = first_dword; i < last_dword; i++) {
- if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
- return -EIO;
+ if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
+ ret_val = -EIO;
+ goto free;
+ }
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
eeprom->len);
+free:
kfree(eeprom_buff);
return ret_val;
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
index d918bbe621ea..927e4de6474d 100644
--- a/drivers/net/atlx/atl2.h
+++ b/drivers/net/atlx/atl2.h
@@ -442,7 +442,7 @@ struct atl2_hw {
struct atl2_ring_header {
/* pointer to the descriptor ring memory */
void *desc;
- /* physical adress of the descriptor ring */
+ /* physical address of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 3dc014215679..72f3306352e2 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -144,7 +144,7 @@ static void atlx_set_multi(struct net_device *netdev)
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
/* compute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
atlx_hash_set(hw, hash_value);
}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 2f8261c9614a..55039d44dc47 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -129,7 +129,6 @@ static int xcvr[NUM_UNITS]; /* The data transfer mode. */
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -861,7 +860,7 @@ static void set_rx_mode_8002(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
+ if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
lp->addr_mode = CMR2h_PROMISC;
else
lp->addr_mode = CMR2h_Normal;
@@ -877,7 +876,8 @@ static void set_rx_mode_8012(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
new_mode = CMR2h_PROMISC;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
@@ -885,9 +885,7 @@ static void set_rx_mode_8012(struct net_device *dev)
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next)
- {
+ netdev_for_each_mc_addr(mclist, dev) {
int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6bac04603a88..4da191b87b0d 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -55,6 +55,7 @@
#include <linux/delay.h>
#include <linux/crc32.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
@@ -63,6 +64,7 @@
#include <asm/processor.h>
#include <au1000.h>
+#include <au1xxx_eth.h>
#include <prom.h>
#include "au1000_eth.h"
@@ -112,15 +114,15 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
*
* PHY detection algorithm
*
- * If AU1XXX_PHY_STATIC_CONFIG is undefined, the PHY setup is
+ * If phy_static_config is undefined, the PHY setup is
* autodetected:
*
* mii_probe() first searches the current MAC's MII bus for a PHY,
- * selecting the first (or last, if AU1XXX_PHY_SEARCH_HIGHEST_ADDR is
+ * selecting the first (or last, if phy_search_highest_addr is
* defined) PHY address not already claimed by another netdev.
*
* If nothing was found that way when searching for the 2nd ethernet
- * controller's PHY and AU1XXX_PHY1_SEARCH_ON_MAC0 is defined, then
+ * controller's PHY and phy1_search_mac0 is defined, then
* the first MII bus is searched as well for an unclaimed PHY; this is
* needed in case of a dual-PHY accessible only through the MAC0's MII
* bus.
@@ -129,9 +131,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* controller is not registered to the network subsystem.
*/
-/* autodetection defaults */
-#undef AU1XXX_PHY_SEARCH_HIGHEST_ADDR
-#define AU1XXX_PHY1_SEARCH_ON_MAC0
+/* autodetection defaults: phy1_search_mac0 */
/* static PHY setup
*
@@ -148,29 +148,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* specific irq-map
*/
-#if defined(CONFIG_MIPS_BOSPORUS)
-/*
- * Micrel/Kendin 5 port switch attached to MAC0,
- * MAC0 is associated with PHY address 5 (== WAN port)
- * MAC1 is not associated with any PHY, since it's connected directly
- * to the switch.
- * no interrupts are used
- */
-# define AU1XXX_PHY_STATIC_CONFIG
-
-# define AU1XXX_PHY0_ADDR 5
-# define AU1XXX_PHY0_BUSID 0
-# undef AU1XXX_PHY0_IRQ
-
-# undef AU1XXX_PHY1_ADDR
-# undef AU1XXX_PHY1_BUSID
-# undef AU1XXX_PHY1_IRQ
-#endif
-
-#if defined(AU1XXX_PHY0_BUSID) && (AU1XXX_PHY0_BUSID > 0)
-# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
-#endif
-
static void enable_mac(struct net_device *dev, int force_reset)
{
unsigned long flags;
@@ -390,67 +367,55 @@ static int mii_probe (struct net_device *dev)
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
-#if defined(AU1XXX_PHY_STATIC_CONFIG)
- BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
+ if (aup->phy_static_config) {
+ BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
- if(aup->mac_id == 0) { /* get PHY0 */
-# if defined(AU1XXX_PHY0_ADDR)
- phydev = au_macs[AU1XXX_PHY0_BUSID]->mii_bus->phy_map[AU1XXX_PHY0_ADDR];
-# else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
- return 0;
-# endif /* defined(AU1XXX_PHY0_ADDR) */
- } else if (aup->mac_id == 1) { /* get PHY1 */
-# if defined(AU1XXX_PHY1_ADDR)
- phydev = au_macs[AU1XXX_PHY1_BUSID]->mii_bus->phy_map[AU1XXX_PHY1_ADDR];
-# else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
+ if (aup->phy_addr)
+ phydev = aup->mii_bus->phy_map[aup->phy_addr];
+ else
+ printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
+ dev->name);
return 0;
-# endif /* defined(AU1XXX_PHY1_ADDR) */
- }
-
-#else /* defined(AU1XXX_PHY_STATIC_CONFIG) */
- int phy_addr;
-
- /* find the first (lowest address) PHY on the current MAC's MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (aup->mii_bus->phy_map[phy_addr]) {
- phydev = aup->mii_bus->phy_map[phy_addr];
-# if !defined(AU1XXX_PHY_SEARCH_HIGHEST_ADDR)
- break; /* break out with first one found */
-# endif
- }
+ } else {
+ int phy_addr;
+
+ /* find the first (lowest address) PHY on the current MAC's MII bus */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (aup->mii_bus->phy_map[phy_addr]) {
+ phydev = aup->mii_bus->phy_map[phy_addr];
+ if (!aup->phy_search_highest_addr)
+ break; /* break out with first one found */
+ }
-# if defined(AU1XXX_PHY1_SEARCH_ON_MAC0)
- /* try harder to find a PHY */
- if (!phydev && (aup->mac_id == 1)) {
- /* no PHY found, maybe we have a dual PHY? */
- printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
- "let's see if it's attached to MAC0...\n");
+ if (aup->phy1_search_mac0) {
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
- BUG_ON(!au_macs[0]);
+ /* find the first (lowest address) non-attached PHY on
+ * the MAC0 MII bus */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *const tmp_phydev =
+ aup->mii_bus->phy_map[phy_addr];
- /* find the first (lowest address) non-attached PHY on
- * the MAC0 MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- struct phy_device *const tmp_phydev =
- au_macs[0]->mii_bus->phy_map[phy_addr];
+ if (aup->mac_id == 1)
+ break;
- if (!tmp_phydev)
- continue; /* no PHY here... */
+ if (!tmp_phydev)
+ continue; /* no PHY here... */
- if (tmp_phydev->attached_dev)
- continue; /* already claimed by MAC0 */
+ if (tmp_phydev->attached_dev)
+ continue; /* already claimed by MAC0 */
- phydev = tmp_phydev;
- break; /* found it */
+ phydev = tmp_phydev;
+ break; /* found it */
+ }
+ }
}
}
-# endif /* defined(AU1XXX_PHY1_SEARCH_OTHER_BUS) */
-#endif /* defined(AU1XXX_PHY_STATIC_CONFIG) */
if (!phydev) {
printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
return -1;
@@ -578,31 +543,6 @@ setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
}
}
-static struct {
- u32 base_addr;
- u32 macen_addr;
- int irq;
- struct net_device *dev;
-} iflist[2] = {
-#ifdef CONFIG_SOC_AU1000
- {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
- {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
-#endif
-#ifdef CONFIG_SOC_AU1100
- {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
-#endif
-#ifdef CONFIG_SOC_AU1500
- {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
- {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
-#endif
-#ifdef CONFIG_SOC_AU1550
- {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
- {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
-#endif
-};
-
-static int num_ifs;
-
/*
* ethtool operations
*/
@@ -711,7 +651,6 @@ static int au1000_init(struct net_device *dev)
static inline void update_rx_stats(struct net_device *dev, u32 status)
{
- struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
ps->rx_packets++;
@@ -969,7 +908,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
}
pDB = aup->tx_db_inuse[aup->tx_head];
- skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
if (skb->len < ETH_ZLEN) {
for (i=skb->len; i<ETH_ZLEN; i++) {
((char *)pDB->vaddr)[i] = 0;
@@ -1013,21 +952,18 @@ static void au1000_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > MULTICAST_FILTER_LIMIT) {
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
} else {
- int i;
struct dev_mc_list *mclist;
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev)
set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
(long *)mc_filter);
- }
aup->mac->multi_hash_high = mc_filter[1];
aup->mac->multi_hash_low = mc_filter[0];
aup->mac->control &= ~MAC_PROMISCUOUS;
@@ -1058,53 +994,59 @@ static const struct net_device_ops au1000_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static struct net_device * au1000_probe(int port_num)
+static int __devinit au1000_probe(struct platform_device *pdev)
{
static unsigned version_printed = 0;
struct au1000_private *aup = NULL;
+ struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
db_dest_t *pDB, *pDBfree;
+ int irq, i, err = 0;
+ struct resource *base, *macen;
char ethaddr[6];
- int irq, i, err;
- u32 base, macen;
- if (port_num >= NUM_ETH_INTERFACES)
- return NULL;
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!base) {
+ printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
+ err = -ENODEV;
+ goto out;
+ }
- base = CPHYSADDR(iflist[port_num].base_addr );
- macen = CPHYSADDR(iflist[port_num].macen_addr);
- irq = iflist[port_num].irq;
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!macen) {
+ printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
+ err = -ENODEV;
+ goto out;
+ }
- if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
- !request_mem_region(macen, 4, "Au1x00 ENET"))
- return NULL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
+ err = -ENODEV;
+ goto out;
+ }
- if (version_printed++ == 0)
- printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+ if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
+ printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
+ printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
+ err = -ENXIO;
+ goto err_request;
+ }
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
- return NULL;
+ err = -ENOMEM;
+ goto err_alloc;
}
- dev->base_addr = base;
- dev->irq = irq;
- dev->netdev_ops = &au1000_netdev_ops;
- SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
- dev->watchdog_timeo = ETH_TX_TIMEOUT;
-
- err = register_netdev(dev);
- if (err != 0) {
- printk(KERN_ERR "%s: Cannot register net device, error %d\n",
- DRV_NAME, err);
- free_netdev(dev);
- return NULL;
- }
-
- printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
- dev->name, base, irq);
-
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
@@ -1115,21 +1057,29 @@ static struct net_device * au1000_probe(int port_num)
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
if (!aup->vaddr) {
- free_netdev(dev);
- release_mem_region( base, MAC_IOSIZE);
- release_mem_region(macen, 4);
- return NULL;
+ printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
+ err = -ENOMEM;
+ goto err_vaddr;
}
/* aup->mac is the base address of the MAC's registers */
- aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
+ aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
+ if (!aup->mac) {
+ printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
+ err = -ENXIO;
+ goto err_remap1;
+ }
- /* Setup some variables for quick register address access */
- aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
- aup->mac_id = port_num;
- au_macs[port_num] = aup;
+ /* Setup some variables for quick register address access */
+ aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
+ if (!aup->enable) {
+ printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
+ err = -ENXIO;
+ goto err_remap2;
+ }
+ aup->mac_id = pdev->id;
- if (port_num == 0) {
+ if (pdev->id == 0) {
if (prom_get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
else {
@@ -1139,7 +1089,7 @@ static struct net_device * au1000_probe(int port_num)
}
setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- } else if (port_num == 1)
+ } else if (pdev->id == 1)
setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
/*
@@ -1147,14 +1097,37 @@ static struct net_device * au1000_probe(int port_num)
* to match those that are printed on their stickers
*/
memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
- dev->dev_addr[5] += port_num;
+ dev->dev_addr[5] += pdev->id;
*aup->enable = 0;
aup->mac_enabled = 0;
+ pd = pdev->dev.platform_data;
+ if (!pd) {
+ printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
+ aup->phy1_search_mac0 = 1;
+ } else {
+ aup->phy_static_config = pd->phy_static_config;
+ aup->phy_search_highest_addr = pd->phy_search_highest_addr;
+ aup->phy1_search_mac0 = pd->phy1_search_mac0;
+ aup->phy_addr = pd->phy_addr;
+ aup->phy_busid = pd->phy_busid;
+ aup->phy_irq = pd->phy_irq;
+ }
+
+ if (aup->phy_busid && aup->phy_busid > 0) {
+ printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
+ "bus not supported yet\n");
+ err = -ENODEV;
+ goto err_mdiobus_alloc;
+ }
+
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL)
- goto err_out;
+ if (aup->mii_bus == NULL) {
+ printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
aup->mii_bus->priv = dev;
aup->mii_bus->read = au1000_mdiobus_read;
@@ -1168,23 +1141,19 @@ static struct net_device * au1000_probe(int port_num)
for(i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
-
/* if known, set corresponding PHY IRQs */
-#if defined(AU1XXX_PHY_STATIC_CONFIG)
-# if defined(AU1XXX_PHY0_IRQ)
- if (AU1XXX_PHY0_BUSID == aup->mac_id)
- aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
-# endif
-# if defined(AU1XXX_PHY1_IRQ)
- if (AU1XXX_PHY1_BUSID == aup->mac_id)
- aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
-# endif
-#endif
- mdiobus_register(aup->mii_bus);
+ if (aup->phy_static_config)
+ if (aup->phy_irq && aup->phy_busid == aup->mac_id)
+ aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
+
+ err = mdiobus_register(aup->mii_bus);
+ if (err) {
+ printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
+ goto err_mdiobus_reg;
+ }
- if (mii_probe(dev) != 0) {
+ if (mii_probe(dev) != 0)
goto err_out;
- }
pDBfree = NULL;
/* setup the data buffer descriptors and attach a buffer to each one */
@@ -1216,19 +1185,35 @@ static struct net_device * au1000_probe(int port_num)
aup->tx_db_inuse[i] = pDB;
}
+ dev->base_addr = base->start;
+ dev->irq = irq;
+ dev->netdev_ops = &au1000_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
/*
* The boot code uses the ethernet controller, so reset it to start
* fresh. au1000_init() expects that the device is in reset state.
*/
reset_mac(dev);
- return dev;
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
+ dev->name);
+ goto err_out;
+ }
+
+ printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ dev->name, (unsigned long)base->start, irq);
+ if (version_printed++ == 0)
+ printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+ return 0;
err_out:
- if (aup->mii_bus != NULL) {
+ if (aup->mii_bus != NULL)
mdiobus_unregister(aup->mii_bus);
- mdiobus_free(aup->mii_bus);
- }
/* here we should have a valid dev plus aup-> register addresses
* so we can reset the mac properly.*/
@@ -1242,67 +1227,84 @@ err_out:
if (aup->tx_db_inuse[i])
ReleaseDB(aup, aup->tx_db_inuse[i]);
}
+err_mdiobus_reg:
+ mdiobus_free(aup->mii_bus);
+err_mdiobus_alloc:
+ iounmap(aup->enable);
+err_remap2:
+ iounmap(aup->mac);
+err_remap1:
dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr);
- unregister_netdev(dev);
+err_vaddr:
free_netdev(dev);
- release_mem_region( base, MAC_IOSIZE);
- release_mem_region(macen, 4);
- return NULL;
+err_alloc:
+ release_mem_region(macen->start, resource_size(macen));
+err_request:
+ release_mem_region(base->start, resource_size(base));
+out:
+ return err;
}
-/*
- * Setup the base address and interrupt of the Au1xxx ethernet macs
- * based on cpu type and whether the interface is enabled in sys_pinfunc
- * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
- */
-static int __init au1000_init_module(void)
+static int __devexit au1000_remove(struct platform_device *pdev)
{
- int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
- struct net_device *dev;
- int i, found_one = 0;
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1000_private *aup = netdev_priv(dev);
+ int i;
+ struct resource *base, *macen;
- num_ifs = NUM_ETH_INTERFACES - ni;
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(dev);
+ mdiobus_unregister(aup->mii_bus);
+ mdiobus_free(aup->mii_bus);
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ if (aup->rx_db_inuse[i])
+ ReleaseDB(aup, aup->rx_db_inuse[i]);
+
+ for (i = 0; i < NUM_TX_DMA; i++)
+ if (aup->tx_db_inuse[i])
+ ReleaseDB(aup, aup->tx_db_inuse[i]);
+
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+
+ iounmap(aup->mac);
+ iounmap(aup->enable);
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(base->start, resource_size(base));
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(macen->start, resource_size(macen));
+
+ free_netdev(dev);
- for(i = 0; i < num_ifs; i++) {
- dev = au1000_probe(i);
- iflist[i].dev = dev;
- if (dev)
- found_one++;
- }
- if (!found_one)
- return -ENODEV;
return 0;
}
-static void __exit au1000_cleanup_module(void)
+static struct platform_driver au1000_eth_driver = {
+ .probe = au1000_probe,
+ .remove = __devexit_p(au1000_remove),
+ .driver = {
+ .name = "au1000-eth",
+ .owner = THIS_MODULE,
+ },
+};
+MODULE_ALIAS("platform:au1000-eth");
+
+
+static int __init au1000_init_module(void)
+{
+ return platform_driver_register(&au1000_eth_driver);
+}
+
+static void __exit au1000_exit_module(void)
{
- int i, j;
- struct net_device *dev;
- struct au1000_private *aup;
-
- for (i = 0; i < num_ifs; i++) {
- dev = iflist[i].dev;
- if (dev) {
- aup = netdev_priv(dev);
- unregister_netdev(dev);
- mdiobus_unregister(aup->mii_bus);
- mdiobus_free(aup->mii_bus);
- for (j = 0; j < NUM_RX_DMA; j++)
- if (aup->rx_db_inuse[j])
- ReleaseDB(aup, aup->rx_db_inuse[j]);
- for (j = 0; j < NUM_TX_DMA; j++)
- if (aup->tx_db_inuse[j])
- ReleaseDB(aup, aup->tx_db_inuse[j]);
- dma_free_noncoherent(NULL, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
- release_mem_region(dev->base_addr, MAC_IOSIZE);
- release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
- free_netdev(dev);
- }
- }
+ platform_driver_unregister(&au1000_eth_driver);
}
module_init(au1000_init_module);
-module_exit(au1000_cleanup_module);
+module_exit(au1000_exit_module);
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index 824ecd5ff3a8..f9d29a29b8fd 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -108,6 +108,15 @@ struct au1000_private {
struct phy_device *phy_dev;
struct mii_bus *mii_bus;
+ /* PHY configuration */
+ int phy_static_config;
+ int phy_search_highest_addr;
+ int phy1_search_mac0;
+
+ int phy_addr;
+ int phy_busid;
+ int phy_irq;
+
/* These variables are just for quick access to certain regs addresses. */
volatile mac_reg_t *mac; /* mac registers */
volatile u32 *enable; /* address of MAC Enable Register */
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..b718dc60afc4 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -25,6 +25,7 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/slab.h>
#include <net/ax88796.h>
@@ -921,7 +922,7 @@ static int ax_probe(struct platform_device *pdev)
size = (res->end - res->start) + 1;
ax->mem2 = request_mem_region(res->start, size, pdev->name);
- if (ax->mem == NULL) {
+ if (ax->mem2 == NULL) {
dev_err(&pdev->dev, "cannot reserve registers\n");
ret = -ENXIO;
goto exit_mem1;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb69586..69d9f3d368ae 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -10,6 +10,8 @@
* Distribute under GPL.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -25,6 +27,7 @@
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/ssb/ssb.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -34,7 +37,6 @@
#include "b44.h"
#define DRV_MODULE_NAME "b44"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "2.0"
#define B44_DEF_MSG_ENABLE \
@@ -102,7 +104,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
#ifdef CONFIG_B44_PCI
-static const struct pci_device_id b44_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
@@ -189,11 +191,10 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
udelay(10);
}
if (i == timeout) {
- printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
- "%lx to %s.\n",
- bp->dev->name,
- bit, reg,
- (clear ? "clear" : "set"));
+ if (net_ratelimit())
+ netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
+ bit, reg, clear ? "clear" : "set");
+
return -ENODEV;
}
return 0;
@@ -333,13 +334,12 @@ static int b44_phy_reset(struct b44 *bp)
err = b44_readphy(bp, MII_BMCR, &val);
if (!err) {
if (val & BMCR_RESET) {
- printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
- bp->dev->name);
+ netdev_err(bp->dev, "PHY Reset would not complete\n");
err = -ENODEV;
}
}
- return 0;
+ return err;
}
static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
@@ -413,7 +413,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
}
return;
error:
- printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
+ pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
}
#else
static inline void b44_wap54g10_workaround(struct b44 *bp)
@@ -506,18 +506,15 @@ static void b44_stats_update(struct b44 *bp)
static void b44_link_report(struct b44 *bp)
{
if (!netif_carrier_ok(bp->dev)) {
- printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
+ netdev_info(bp->dev, "Link is down\n");
} else {
- printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
- bp->dev->name,
- (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
- (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
-
- printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
- "%s for RX.\n",
- bp->dev->name,
- (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
- (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
+ netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
+ (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
+
+ netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
+ (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
+ (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
}
}
@@ -576,11 +573,9 @@ static void b44_check_phy(struct b44 *bp)
}
if (bmsr & BMSR_RFAULT)
- printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
- bp->dev->name);
+ netdev_warn(bp->dev, "Remote fault detected in PHY\n");
if (bmsr & BMSR_JCD)
- printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
- bp->dev->name);
+ netdev_warn(bp->dev, "Jabber detected in PHY\n");
}
}
@@ -815,7 +810,7 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
- copy_skb = dev_alloc_skb(len + 2);
+ copy_skb = netdev_alloc_skb(bp->dev, len + 2);
if (copy_skb == NULL)
goto drop_it_no_recycle;
@@ -901,7 +896,7 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
handled = 1;
if (unlikely(!netif_running(dev))) {
- printk(KERN_INFO "%s: late interrupt.\n", dev->name);
+ netdev_info(dev, "late interrupt\n");
goto irq_ack;
}
@@ -926,8 +921,7 @@ static void b44_tx_timeout(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
- printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
- dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
spin_lock_irq(&bp->lock);
@@ -956,8 +950,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
netif_stop_queue(dev);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_out;
}
@@ -1333,7 +1326,7 @@ static void b44_halt(struct b44 *bp)
/* reset PHY */
b44_phy_reset(bp);
/* power down PHY */
- printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
+ netdev_info(bp->dev, "powering down PHY\n");
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
/* now reset the chip, but without enabling the MAC&PHY
* part of it. This has to be done _after_ we shut down the PHY */
@@ -1524,7 +1517,7 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
if (!pwol_pattern) {
- printk(KERN_ERR PFX "Memory not available for WOL\n");
+ pr_err("Memory not available for WOL\n");
return;
}
@@ -1691,10 +1684,12 @@ static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
struct dev_mc_list *mclist;
int i, num_ents;
- num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
- mclist = dev->mc_list;
- for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
- __b44_cam_write(bp, mclist->dmi_addr, i + 1);
+ num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
+ if (i == num_ents)
+ break;
+ __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
}
return i+1;
}
@@ -1716,7 +1711,7 @@ static void __b44_set_rx_mode(struct net_device *dev)
__b44_set_mac_addr(bp);
if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > B44_MCAST_TABLE_SIZE))
+ (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI;
else
i = __b44_load_mcast(bp, dev);
@@ -2097,7 +2092,7 @@ static int __devinit b44_get_invariants(struct b44 *bp)
memcpy(bp->dev->dev_addr, addr, 6);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
- printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
+ pr_err("Invalid MAC address found in EEPROM\n");
return -EINVAL;
}
@@ -2142,12 +2137,12 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
instance++;
if (b44_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
- dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
+ dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto out;
}
@@ -2186,13 +2181,13 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
if (err) {
dev_err(sdev->dev,
- "Required 30BIT DMA mask unsupported by the system.\n");
+ "Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
}
err = b44_get_invariants(bp);
if (err) {
dev_err(sdev->dev,
- "Problem fetching invariants of chip, aborting.\n");
+ "Problem fetching invariants of chip, aborting\n");
goto err_out_powerdown;
}
@@ -2212,7 +2207,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
err = register_netdev(dev);
if (err) {
- dev_err(sdev->dev, "Cannot register net device, aborting.\n");
+ dev_err(sdev->dev, "Cannot register net device, aborting\n");
goto err_out_powerdown;
}
@@ -2223,8 +2218,12 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
*/
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
- printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
- dev->name, dev->dev_addr);
+ /* do a phy reset to test if there is an active phy */
+ if (b44_phy_reset(bp) < 0)
+ bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+
+ netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
@@ -2297,7 +2296,7 @@ static int b44_resume(struct ssb_device *sdev)
rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
- printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
+ netdev_err(dev, "request_irq failed\n");
return rc;
}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0bd47d32ec42..17460aba3bae 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
@@ -619,7 +620,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
/* only 3 perfect match registers left, first one is used for
* own mac address */
- if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK;
else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
@@ -631,16 +632,13 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
return;
}
- for (i = 0, mc_list = dev->mc_list;
- (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
- i++, mc_list = mc_list->next) {
+ i = 0;
+ netdev_for_each_mc_addr(mc_list, dev) {
u8 *dmi_addr;
u32 tmp;
- /* filter non ethernet address */
- if (mc_list->dmi_addrlen != 6)
- continue;
-
+ if (i == 3)
+ break;
/* update perfect match registers */
dmi_addr = mc_list->dmi_addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
@@ -649,7 +647,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
tmp |= ENET_PMH_DATAVALID_MASK;
- enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
+ enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
}
for (; i < 3; i++) {
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
index fdb6e81a4374..1a41a49bb619 100644
--- a/drivers/net/benet/Kconfig
+++ b/drivers/net/benet/Kconfig
@@ -1,6 +1,6 @@
config BE2NET
- tristate "ServerEngines' 10Gbps NIC - BladeEngine 2"
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
depends on PCI && INET
help
This driver implements the NIC functionality for ServerEngines'
- 10Gbps network adapter - BladeEngine 2.
+ 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 9e56014d27ed..56387b191c96 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -29,31 +29,30 @@
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
+#include <linux/slab.h>
#include "be_hw.h"
-#define DRV_VER "2.101.346u"
+#define DRV_VER "2.102.147u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
-#define DRV_DESC BE_NAME "Driver"
+#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
#define OC_DEVICE_ID1 0x700
-#define OC_DEVICE_ID2 0x701
-#define OC_DEVICE_ID3 0x710
+#define OC_DEVICE_ID2 0x710
static inline char *nic_name(struct pci_dev *pdev)
{
switch (pdev->device) {
case OC_DEVICE_ID1:
- case OC_DEVICE_ID2:
return OC_NAME;
- case OC_DEVICE_ID3:
+ case OC_DEVICE_ID2:
return OC_NAME1;
case BE_DEVICE_ID2:
return BE3_NAME;
@@ -153,6 +152,7 @@ struct be_eq_obj {
struct be_mcc_obj {
struct be_queue_info q;
struct be_queue_info cq;
+ bool rearm_cq;
};
struct be_drvr_stats {
@@ -165,6 +165,7 @@ struct be_drvr_stats {
ulong be_tx_jiffies;
u64 be_tx_bytes;
u64 be_tx_bytes_prev;
+ u64 be_tx_pkts;
u32 be_tx_rate;
u32 cache_barrier[16];
@@ -176,6 +177,7 @@ struct be_drvr_stats {
ulong be_rx_jiffies;
u64 be_rx_bytes;
u64 be_rx_bytes_prev;
+ u64 be_rx_pkts;
u32 be_rx_rate;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
@@ -252,7 +254,8 @@ struct be_adapter {
bool rx_post_starved; /* Zero rx frags have been posted to BE */
struct vlan_group *vlan_grp;
- u16 num_vlans;
+ u16 vlans_added;
+ u16 max_vlans; /* Number of vlans supported */
u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
struct be_dma_mem mc_cmd_mem;
@@ -266,6 +269,7 @@ struct be_adapter {
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
+ bool eeh_err;
bool link_up;
u32 port_num;
bool promiscuous;
@@ -275,17 +279,18 @@ struct be_adapter {
u32 tx_fc; /* Tx flow control */
int link_speed;
u8 port_type;
+ u8 transceiver;
+ u8 generation; /* BladeEngine ASIC generation */
};
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
+
extern const struct ethtool_ops be_ethtool_ops;
#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
-static inline unsigned int be_pci_func(struct be_adapter *adapter)
-{
- return PCI_FUNC(adapter->pdev->devfn);
-}
-
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
#define PAGE_SHIFT_4K 12
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1b68bd98dc0c..d0ef4ac987cd 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -104,10 +104,26 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
return NULL;
}
-int be_process_mcc(struct be_adapter *adapter)
+void be_async_mcc_enable(struct be_adapter *adapter)
+{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
+ adapter->mcc_obj.rearm_cq = true;
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+}
+
+void be_async_mcc_disable(struct be_adapter *adapter)
+{
+ adapter->mcc_obj.rearm_cq = false;
+}
+
+int be_process_mcc(struct be_adapter *adapter, int *status)
{
struct be_mcc_compl *compl;
- int num = 0, status = 0;
+ int num = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock_bh(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
@@ -119,31 +135,31 @@ int be_process_mcc(struct be_adapter *adapter)
be_async_link_state_process(adapter,
(struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- status = be_mcc_compl_process(adapter, compl);
- atomic_dec(&adapter->mcc_obj.q.used);
+ *status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
}
be_mcc_compl_use(compl);
num++;
}
- if (num)
- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
-
spin_unlock_bh(&adapter->mcc_cq_lock);
- return status;
+ return num;
}
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
#define mcc_timeout 120000 /* 12s timeout */
- int i, status;
+ int i, num, status = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
for (i = 0; i < mcc_timeout; i++) {
- status = be_process_mcc(adapter);
- if (status)
- return status;
+ num = be_process_mcc(adapter, &status);
+ if (num)
+ be_cq_notify(adapter, mcc_obj->cq.id,
+ mcc_obj->rearm_cq, num);
- if (atomic_read(&adapter->mcc_obj.q.used) == 0)
+ if (atomic_read(&mcc_obj->q.used) == 0)
break;
udelay(100);
}
@@ -151,7 +167,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
return -1;
}
- return 0;
+ return status;
}
/* Notify MCC requests and wait for completion */
@@ -167,7 +183,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
u32 ready;
do {
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ ready = ioread32(db);
+ if (ready == 0xffffffff) {
+ dev_err(&adapter->pdev->dev,
+ "pci slot disconnected\n");
+ return -1;
+ }
+
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
@@ -198,6 +221,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
struct be_mcc_mailbox *mbox = mbox_mem->va;
struct be_mcc_compl *compl = &mbox->compl;
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
val |= MPU_MAILBOX_DB_HI_MASK;
/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -286,7 +314,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
MCC_WRB_SGE_CNT_SHIFT;
wrb->payload_length = payload_len;
wrb->tag0 = opcode;
- be_dws_cpu_to_le(wrb, 20);
+ be_dws_cpu_to_le(wrb, 8);
}
/* Don't touch the hdr after it's prepared */
@@ -296,6 +324,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+ req_hdr->version = 0;
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@@ -396,6 +425,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = (u8 *)wrb_from_mbox(adapter);
@@ -433,8 +465,6 @@ int be_cmd_eq_create(struct be_adapter *adapter,
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- AMAP_SET_BITS(struct amap_eq_context, func, req->context,
- be_pci_func(adapter));
AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
/* 4byte eqe*/
AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
@@ -597,7 +627,6 @@ int be_cmd_cq_create(struct be_adapter *adapter,
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -644,9 +673,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MCC_CREATE, sizeof(*req));
- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
be_encoded_q_len(mccq->len));
@@ -695,8 +723,6 @@ int be_cmd_txq_create(struct be_adapter *adapter,
AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
be_encoded_q_len(txq->len));
- AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
- be_pci_func(adapter));
AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
@@ -768,6 +794,9 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -856,6 +885,9 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
struct be_cmd_req_if_destroy *req;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -1096,8 +1128,7 @@ err:
* (mc == NULL) => multicast promiscous
*/
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
- struct dev_mc_list *mc_list, u32 mc_count,
- struct be_dma_mem *mem)
+ struct net_device *netdev, struct be_dma_mem *mem)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcast_mac_config *req = mem->va;
@@ -1124,13 +1155,14 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
req->interface_id = if_id;
- if (mc_list) {
+ if (netdev) {
int i;
struct dev_mc_list *mc;
- req->num_mac = cpu_to_le16(mc_count);
+ req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
- for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
+ i = 0;
+ netdev_for_each_mc_addr(mc, netdev)
memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
} else {
req->promiscuous = 1;
@@ -1374,7 +1406,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req = cmd->va;
+ struct be_cmd_write_flashrom *req;
struct be_sge *sge;
int status;
@@ -1408,7 +1440,8 @@ err:
return status;
}
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -1429,10 +1462,10 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
- req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
- req->params.offset = 0x3FFFC;
- req->params.data_buf_size = 0x4;
+ req->params.offset = cpu_to_le32(offset);
+ req->params.data_buf_size = cpu_to_le32(0x4);
status = be_mcc_notify_wait(adapter);
if (!status)
@@ -1479,6 +1512,41 @@ err:
return status;
}
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_lmode *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ sizeof(*req));
+
+ req->src_port = port_num;
+ req->dest_port = port_num;
+ req->loopback_type = loopback_type;
+ req->loopback_state = enable;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
@@ -1501,6 +1569,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+ req->hdr.timeout = 4;
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
@@ -1571,3 +1640,33 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_SEEPROM_READ);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 92b87ef156ed..cce61f9a3714 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SEEPROM_READ 30
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -155,6 +156,7 @@ struct be_mcc_mailbox {
#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
struct be_cmd_req_hdr {
u8 opcode; /* dword 0 */
@@ -163,7 +165,8 @@ struct be_cmd_req_hdr {
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
- u32 rsvd; /* dword 3 */
+ u8 version; /* dword 3 */
+ u8 rsvd[3]; /* dword 3 */
};
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
@@ -821,6 +824,19 @@ struct be_cmd_resp_loopback_test {
u32 ticks_compl;
};
+struct be_cmd_req_set_lmode {
+ struct be_cmd_req_hdr hdr;
+ u8 src_port;
+ u8 dest_port;
+ u8 loopback_type;
+ u8 loopback_state;
+};
+
+struct be_cmd_resp_set_lmode {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 rsvd0[4];
+};
+
/********************** DDR DMA test *********************/
struct be_cmd_req_ddrdma_test {
struct be_cmd_req_hdr hdr;
@@ -840,6 +856,19 @@ struct be_cmd_resp_ddrdma_test {
u8 rcv_buff[4096];
};
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -883,8 +912,7 @@ extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
u8 port_num, bool en);
extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
- struct dev_mc_list *mc_list, u32 mc_count,
- struct be_dma_mem *mem);
+ struct net_device *netdev, struct be_dma_mem *mem);
extern int be_cmd_set_flow_control(struct be_adapter *adapter,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
@@ -892,7 +920,7 @@ extern int be_cmd_get_flow_control(struct be_adapter *adapter,
extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
u32 *port_num, u32 *cap);
extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_process_mcc(struct be_adapter *adapter);
+extern int be_process_mcc(struct be_adapter *adapter, int *status);
extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
@@ -902,13 +930,21 @@ extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
-extern int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc);
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
extern int be_cmd_fw_init(struct be_adapter *adapter);
extern int be_cmd_fw_clean(struct be_adapter *adapter);
+extern void be_async_mcc_enable(struct be_adapter *adapter);
+extern void be_async_mcc_disable(struct be_adapter *adapter);
extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 loopback_type, u32 pkt_size,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 298b92cbd689..51e1065e7897 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -112,12 +112,14 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
"PHY Loopback test",
"External Loopback test",
"DDR DMA test"
+ "Link test"
};
#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
#define BE_MAC_LOOPBACK 0x0
#define BE_PHY_LOOPBACK 0x1
#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -339,28 +341,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = be_cmd_read_port_type(adapter, adapter->port_num,
&connector);
- switch (connector) {
- case 7:
- ecmd->port = PORT_FIBRE;
- break;
- default:
- ecmd->port = PORT_TP;
- break;
+ if (!status) {
+ switch (connector) {
+ case 7:
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case 0:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ }
+ } else {
+ ecmd->port = PORT_AUI;
+ ecmd->transceiver = XCVR_INTERNAL;
}
/* Save for future use */
adapter->link_speed = ecmd->speed;
adapter->port_type = ecmd->port;
+ adapter->transceiver = ecmd->transceiver;
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
+ ecmd->transceiver = adapter->transceiver;
}
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
ecmd->phy_address = adapter->port_num;
- ecmd->transceiver = XCVR_INTERNAL;
+ switch (ecmd->port) {
+ case PORT_FIBRE:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ break;
+ case PORT_TP:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+ break;
+ case PORT_AUI:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
+ break;
+ }
return 0;
}
@@ -466,7 +490,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
- u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5};
+ u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
@@ -489,37 +513,56 @@ err:
return ret;
}
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+ u64 *status)
+{
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ loopback_type, 1);
+ *status = be_cmd_loopback_test(adapter, adapter->port_num,
+ loopback_type, 1500,
+ 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ BE_NO_LOOPBACK, 1);
+ return *status;
+}
+
static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ bool link_up;
+ u8 mac_speed = 0;
+ u16 qos_link_speed = 0;
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- data[0] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_MAC_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[0] != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
+ &data[0]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[1] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_PHY_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[1] != 0)
+ }
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
+ &data[1]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[2] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_ONE_PORT_EXT_LOOPBACK,
- 1500, 2, 0xabc);
- if (data[2] != 0)
+ }
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
- data[3] = be_test_ddr_dma(adapter);
- if (data[3] != 0)
- test->flags |= ETH_TEST_FL_FAILED;
+ if (be_test_ddr_dma(adapter) != 0) {
+ data[3] = 1;
+ test->flags |= ETH_TEST_FL_FAILED;
}
+ if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
+ &qos_link_speed) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ } else if (mac_speed) {
+ data[4] = 1;
+ }
}
static int
@@ -536,12 +579,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
return be_load_fw(adapter, file_name);
}
+static int
+be_get_eeprom_len(struct net_device *netdev)
+{
+ return BE_READ_SEEPROM_LEN;
+}
+
+static int
+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
+ &eeprom_cmd.dma);
+
+ if (!eeprom_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure. Could not read eeprom\n");
+ return -ENOMEM;
+ }
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
+ }
+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return status;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
.get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index e2b3beffd49d..2d4a4b827637 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -99,6 +99,64 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/* Flashrom related descriptors */
+#define IMAGE_TYPE_FIRMWARE 160
+#define IMAGE_TYPE_BOOTCODE 224
+#define IMAGE_TYPE_OPTIONROM 32
+
+#define NUM_FLASHDIR_ENTRIES 32
+
+#define IMG_TYPE_ISCSI_ACTIVE 0
+#define IMG_TYPE_REDBOOT 1
+#define IMG_TYPE_BIOS 2
+#define IMG_TYPE_PXE_BIOS 3
+#define IMG_TYPE_FCOE_BIOS 8
+#define IMG_TYPE_ISCSI_BACKUP 9
+#define IMG_TYPE_FCOE_FW_ACTIVE 10
+#define IMG_TYPE_FCOE_FW_BACKUP 11
+#define IMG_TYPE_NCSI_FW 13
+
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+
+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image sz */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max fw image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144) /* Max NSCI image sz */
+
+#define FLASH_NCSI_MAGIC (0x16032009)
+#define FLASH_NCSI_DISABLED (0)
+#define FLASH_NCSI_ENABLED (1)
+
+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
+
+/* Offsets for components on Flash. */
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
+#define FLASH_PXE_BIOS_START_g2 (7864320)
+#define FLASH_FCoE_BIOS_START_g2 (524288)
+#define FLASH_REDBOOT_START_g2 (0)
+
+#define FLASH_NCSI_START_g3 (15990784)
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
+#define FLASH_PXE_BIOS_START_g3 (13107200)
+#define FLASH_FCoE_BIOS_START_g3 (13631488)
+#define FLASH_REDBOOT_START_g3 (262144)
+
+
+
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
@@ -107,6 +165,7 @@
#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
#define EQ_ENTRY_RES_ID_SHIFT 16
+
struct be_eq_entry {
u32 evt;
};
@@ -221,41 +280,6 @@ struct be_eth_rx_compl {
u32 dw[4];
};
-/* Flashrom related descriptors */
-#define IMAGE_TYPE_FIRMWARE 160
-#define IMAGE_TYPE_BOOTCODE 224
-#define IMAGE_TYPE_OPTIONROM 32
-
-#define NUM_FLASHDIR_ENTRIES 32
-
-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
-#define FLASHROM_TYPE_REDBOOT 1
-#define FLASHROM_TYPE_BIOS 2
-#define FLASHROM_TYPE_PXE_BIOS 3
-#define FLASHROM_TYPE_FCOE_BIOS 8
-#define FLASHROM_TYPE_ISCSI_BACKUP 9
-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
-
-#define FLASHROM_OPER_FLASH 1
-#define FLASHROM_OPER_SAVE 2
-#define FLASHROM_OPER_REPORT 4
-
-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE (262144) /* Max redboot image sz */
-
-/* Offsets for components on Flash. */
-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
-#define FLASH_iSCSI_BIOS_START (7340032)
-#define FLASH_PXE_BIOS_START (7864320)
-#define FLASH_FCoE_BIOS_START (524288)
-#define FLASH_REDBOOT_START (32768)
-#define FLASH_REDBOOT_ISM_START (0)
-
struct controller_id {
u32 vendor;
u32 device;
@@ -263,7 +287,20 @@ struct controller_id {
u32 subdevice;
};
-struct flash_file_hdr {
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+struct flash_file_hdr_g2 {
u8 sign[32];
u32 cksum;
u32 antidote;
@@ -275,6 +312,17 @@ struct flash_file_hdr {
u8 build[24];
};
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 rsvd[32];
+};
+
struct flash_section_hdr {
u32 format_rev;
u32 cksum;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 3a1f7902c16d..ec6ace802256 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -34,7 +34,6 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -69,6 +68,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
u32 reg = ioread32(addr);
u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (adapter->eeh_err)
+ return;
+
if (!enabled && enable)
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
else if (enabled && !enable)
@@ -100,6 +102,10 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_EQ_REARM_SHIFT;
if (clear_int)
@@ -113,6 +119,10 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_CQ_REARM_SHIFT;
val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
@@ -149,13 +159,10 @@ void netdev_stats_update(struct be_adapter *adapter)
struct net_device_stats *dev_stats = &adapter->netdev->stats;
struct be_erx_stats *erx_stats = &hw_stats->erx;
- dev_stats->rx_packets = port_stats->rx_total_frames;
- dev_stats->tx_packets = port_stats->tx_unicastframes +
- port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
- dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
- (u64) port_stats->rx_bytes_lsd;
- dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
- (u64) port_stats->tx_bytes_lsd;
+ dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
+ dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
+ dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
+ dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -312,12 +319,13 @@ static void be_tx_rate_update(struct be_adapter *adapter)
}
static void be_tx_stats_update(struct be_adapter *adapter,
- u32 wrb_cnt, u32 copied, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
stats->be_tx_reqs++;
stats->be_tx_wrbs += wrb_cnt;
stats->be_tx_bytes += copied;
+ stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
if (stopped)
stats->be_tx_stops++;
}
@@ -462,7 +470,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
+ be_tx_stats_update(adapter, wrb_cnt, copied,
+ skb_shinfo(skb)->gso_segs, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
@@ -474,10 +483,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
"MTU must be between %d and %d bytes\n",
- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
@@ -487,17 +498,16 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
}
/*
- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
- * set the BE in promiscuous VLAN mode.
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
*/
static int be_vid_config(struct be_adapter *adapter)
{
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
- int status;
+ int status = 0;
- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
+ if (adapter->vlans_added <= adapter->max_vlans) {
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (adapter->vlan_tag[i]) {
@@ -531,21 +541,21 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans++;
adapter->vlan_tag[vid] = 1;
-
- be_vid_config(adapter);
+ adapter->vlans_added++;
+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ be_vid_config(adapter);
}
static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans--;
adapter->vlan_tag[vid] = 0;
-
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
- be_vid_config(adapter);
+ adapter->vlans_added--;
+ if (adapter->vlans_added <= adapter->max_vlans)
+ be_vid_config(adapter);
}
static void be_set_multicast_list(struct net_device *netdev)
@@ -565,14 +575,15 @@ static void be_set_multicast_list(struct net_device *netdev)
}
/* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > BE_MAX_MC) {
+ be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
&adapter->mc_cmd_mem);
goto done;
}
- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
- netdev->mc_count, &adapter->mc_cmd_mem);
+ be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
+ &adapter->mc_cmd_mem);
done:
return;
}
@@ -607,6 +618,7 @@ static void be_rx_stats_update(struct be_adapter *adapter,
stats->be_rx_compl++;
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
+ stats->be_rx_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -634,9 +646,11 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user)
+ if (rx_page_info->last_page_user) {
pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ rx_page_info->last_page_user = false;
+ }
atomic_dec(&rxq->used);
return rx_page_info;
@@ -666,17 +680,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
* indicated by rxcp.
*/
static void skb_fill_rx_data(struct be_adapter *adapter,
- struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
+ struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
+ u16 num_rcvd)
{
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd, j;
+ u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
page_info = get_rx_page_info(adapter, rxq_idx);
@@ -704,7 +718,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->data_len = curr_frag_len - hdr_len;
skb->tail += hdr_len;
}
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
if (pktsize <= rx_frag_size) {
BUG_ON(num_rcvd != 1);
@@ -737,7 +751,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -752,25 +766,23 @@ static void be_rx_compl_process(struct be_adapter *adapter,
{
struct sk_buff *skb;
u32 vlanf, vid;
+ u16 num_rcvd;
u8 vtm;
- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
-
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->cap & 0x400) && !vtm)
- vlanf = 0;
+ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ /* Is it a flush compl that has no data */
+ if (unlikely(num_rcvd == 0))
+ return;
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
- if (!skb) {
+ if (unlikely(!skb)) {
if (net_ratelimit())
dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
be_rx_compl_discard(adapter, rxcp);
return;
}
- skb_fill_rx_data(adapter, skb, rxcp);
+ skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
if (do_pkt_csum(rxcp, adapter->rx_csum))
skb->ip_summed = CHECKSUM_NONE;
@@ -781,13 +793,21 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->dev = adapter->netdev;
- if (vlanf) {
- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
+ vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
+ vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->cap & 0x400) && !vtm)
+ vlanf = 0;
+
+ if (unlikely(vlanf)) {
+ if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
return;
}
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
- vid = be16_to_cpu(vid);
+ vid = swab16(vid);
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
} else {
netif_receive_skb(skb);
@@ -809,6 +829,10 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
u8 vtm;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ /* Is it a flush compl that has no data */
+ if (unlikely(num_rcvd == 0))
+ return;
+
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -860,9 +884,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
napi_gro_frags(&eq_obj->napi);
} else {
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
- vid = be16_to_cpu(vid);
+ vid = swab16(vid);
- if (!adapter->vlan_grp || adapter->num_vlans == 0)
+ if (!adapter->vlan_grp || adapter->vlans_added == 0)
return;
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
@@ -910,7 +934,7 @@ static inline struct page *be_alloc_pages(u32 size)
static void be_post_rx_frags(struct be_adapter *adapter)
{
struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
- struct be_rx_page_info *page_info = NULL;
+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct page *pagep = NULL;
struct be_eth_rx_d *rxd;
@@ -941,7 +965,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
rxd = queue_head_node(rxq);
rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
- queue_head_inc(rxq);
/* Any space left in the current big page for another frag? */
if ((page_offset + rx_frag_size + rx_frag_size) >
@@ -949,10 +972,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
pagep = NULL;
page_info->last_page_user = true;
}
+
+ prev_page_info = page_info;
+ queue_head_inc(rxq);
page_info = &page_info_tbl[rxq->head];
}
if (pagep)
- page_info->last_page_user = true;
+ prev_page_info->last_page_user = true;
if (posted) {
atomic_add(posted, &rxq->used);
@@ -1102,6 +1128,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_tx_compl *txcp;
u16 end_idx, cmpl = 0, timeo = 0;
+ struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+ struct sk_buff *sent_skb;
+ bool dummy_wrb;
/* Wait for a max of 200ms for all the tx-completions to arrive. */
do {
@@ -1125,6 +1154,15 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
if (atomic_read(&txq->used))
dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = sent_skbs[txq->tail];
+ end_idx = txq->tail;
+ index_adv(&end_idx,
+ wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+ be_tx_compl_process(adapter, end_idx);
+ }
}
static void be_mcc_queues_destroy(struct be_adapter *adapter)
@@ -1257,6 +1295,11 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
q = &adapter->rx_obj.q;
if (q->created) {
be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
+
+ /* After the rxq is invalidated, wait for a grace time
+ * of 1ms for all dma to end and the flush compl to arrive
+ */
+ mdelay(1);
be_rx_q_clean(adapter);
}
be_queue_free(adapter, q);
@@ -1339,7 +1382,7 @@ rx_eq_free:
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
{
- return eq_id - 8 * be_pci_func(adapter);
+ return eq_id % 8;
}
static irqreturn_t be_intx(int irq, void *dev)
@@ -1348,7 +1391,7 @@ static irqreturn_t be_intx(int irq, void *dev)
int isr;
isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- be_pci_func(adapter) * CEV_ISR_SIZE);
+ (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
if (!isr)
return IRQ_NONE;
@@ -1426,23 +1469,38 @@ int be_poll_rx(struct napi_struct *napi, int budget)
return work_done;
}
-void be_process_tx(struct be_adapter *adapter)
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
- u32 num_cmpl = 0;
+ int tx_compl = 0, mcc_compl, status = 0;
u16 end_idx;
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
+ wrb_index, txcp);
be_tx_compl_process(adapter, end_idx);
- num_cmpl++;
+ tx_compl++;
}
- if (num_cmpl) {
- be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ napi_complete(napi);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
+ }
+
+ if (tx_compl) {
+ be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
/* As Tx wrbs have been freed up, wake up netdev queue if
* it was stopped due to lack of tx wrbs.
@@ -1453,24 +1511,8 @@ void be_process_tx(struct be_adapter *adapter)
}
drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += num_cmpl;
+ drvr_stats(adapter)->be_tx_compl += tx_compl;
}
-}
-
-/* As TX and MCC share the same EQ check for both TX and MCC completions.
- * For TX/MCC we don't honour budget; consume everything
- */
-static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
-{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
-
- napi_complete(napi);
-
- be_process_tx(adapter);
-
- be_process_mcc(adapter);
return 1;
}
@@ -1639,6 +1681,9 @@ static int be_open(struct net_device *netdev)
/* Rx compl queue may be in unarmed state; rearm it */
be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
+ /* Now that interrupts are on we can process async mcc */
+ be_async_mcc_enable(adapter);
+
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
@@ -1764,6 +1809,8 @@ static int be_close(struct net_device *netdev)
cancel_delayed_work_sync(&adapter->work);
+ be_async_mcc_disable(adapter);
+
netif_stop_queue(netdev);
netif_carrier_off(netdev);
adapter->link_up = false;
@@ -1796,15 +1843,19 @@ char flash_cookie[2][16] = {"*** SE FLAS",
"H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p)
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
{
u32 crc_offset;
u8 flashed_crc[4];
int status;
- crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
- + sizeof(struct flash_file_hdr) - 32*1024;
+
+ crc_offset = hdr_size + img_start + image_size - 4;
+
p += crc_offset;
- status = be_cmd_get_flash_crc(adapter, flashed_crc);
+
+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
+ (image_size - 4));
if (status) {
dev_err(&adapter->pdev->dev,
"could not get crc from flash, not flashing redboot\n");
@@ -1816,112 +1867,133 @@ static bool be_flash_redboot(struct be_adapter *adapter,
return false;
else
return true;
-
}
-static int be_flash_image(struct be_adapter *adapter,
+static int be_flash_data(struct be_adapter *adapter,
const struct firmware *fw,
- struct be_dma_mem *flash_cmd, u32 flash_type)
+ struct be_dma_mem *flash_cmd, int num_of_images)
+
{
- int status;
- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
+ int status = 0, i, filehdr_size = 0;
+ u32 total_bytes = 0, flash_op;
int num_bytes;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
-
- switch (flash_type) {
- case FLASHROM_TYPE_ISCSI_ACTIVE:
- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_ISCSI_BACKUP:
- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_BACKUP:
- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_BIOS:
- image_offset = FLASH_iSCSI_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_BIOS:
- image_offset = FLASH_FCoE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_PXE_BIOS:
- image_offset = FLASH_PXE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_REDBOOT:
- if (!be_flash_redboot(adapter, fw->data))
- return 0;
- image_offset = FLASH_REDBOOT_ISM_START;
- image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
- break;
- default:
- return 0;
+ struct flash_comp *pflashcomp;
+ int num_comp;
+
+ struct flash_comp gen3_flash_types[9] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
+ FLASH_NCSI_IMAGE_MAX_SIZE_g3}
+ };
+ struct flash_comp gen2_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2}
+ };
+
+ if (adapter->generation == BE_GEN3) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ num_comp = 9;
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
+ num_comp = 8;
}
-
- p += sizeof(struct flash_file_hdr) + image_offset;
- if (p + image_size > fw->data + fw->size)
+ for (i = 0; i < num_comp; i++) {
+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+ continue;
+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
+ (!be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size,
+ filehdr_size)))
+ continue;
+ p = fw->data;
+ p += filehdr_size + pflashcomp[i].offset
+ + (num_of_images * sizeof(struct image_hdr));
+ if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
-
- total_bytes = image_size;
-
- while (total_bytes) {
- if (total_bytes > 32*1024)
- num_bytes = 32*1024;
- else
- num_bytes = total_bytes;
- total_bytes -= num_bytes;
-
- if (!total_bytes)
- flash_op = FLASHROM_OPER_FLASH;
- else
- flash_op = FLASHROM_OPER_SAVE;
- memcpy(req->params.data_buf, p, num_bytes);
- p += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd,
- flash_type, flash_op, num_bytes);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed. type/op %d/%d\n",
- flash_type, flash_op);
- return -1;
+ total_bytes = pflashcomp[i].size;
+ while (total_bytes) {
+ if (total_bytes > 32*1024)
+ num_bytes = 32*1024;
+ else
+ num_bytes = total_bytes;
+ total_bytes -= num_bytes;
+
+ if (!total_bytes)
+ flash_op = FLASHROM_OPER_FLASH;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ memcpy(req->params.data_buf, p, num_bytes);
+ p += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd,
+ pflashcomp[i].optype, flash_op, num_bytes);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return -1;
+ }
+ yield();
}
- yield();
}
-
return 0;
}
+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ return 0;
+ if (fhdr->build[0] == '3')
+ return BE_GEN3;
+ else if (fhdr->build[0] == '2')
+ return BE_GEN2;
+ else
+ return 0;
+}
+
int be_load_fw(struct be_adapter *adapter, u8 *func)
{
char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
const struct firmware *fw;
- struct flash_file_hdr *fhdr;
- struct flash_section_info *fsec = NULL;
+ struct flash_file_hdr_g2 *fhdr;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr = NULL;
struct be_dma_mem flash_cmd;
- int status;
+ int status, i = 0, num_imgs = 0;
const u8 *p;
- bool entry_found = false;
- int flash_type;
- char fw_ver[FW_VER_LEN];
- char fw_cfg;
-
- status = be_cmd_get_fw_ver(adapter, fw_ver);
- if (status)
- return status;
- fw_cfg = *(fw_ver + 2);
- if (fw_cfg == '0')
- fw_cfg = '1';
strcpy(fw_file, func);
status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -1929,34 +2001,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
p = fw->data;
- fhdr = (struct flash_file_hdr *) p;
- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
- dev_err(&adapter->pdev->dev,
- "Firmware(%s) load error (signature did not match)\n",
- fw_file);
- status = -1;
- goto fw_exit;
- }
-
+ fhdr = (struct flash_file_hdr_g2 *) p;
dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
- p += sizeof(struct flash_file_hdr);
- while (p < (fw->data + fw->size)) {
- fsec = (struct flash_section_info *)p;
- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
- entry_found = true;
- break;
- }
- p += 32;
- }
-
- if (!entry_found) {
- status = -1;
- dev_err(&adapter->pdev->dev,
- "Flash cookie not found in firmware image\n");
- goto fw_exit;
- }
-
flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
&flash_cmd.dma);
@@ -1967,12 +2014,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
}
- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
- status = be_flash_image(adapter, fw, &flash_cmd,
- flash_type);
- if (status)
- break;
+ if ((adapter->generation == BE_GEN3) &&
+ (get_ufigen_type(fhdr) == BE_GEN3)) {
+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
+ for (i = 0; i < num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *) (fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
+ status = be_flash_data(adapter, fw, &flash_cmd,
+ num_imgs);
+ }
+ } else if ((adapter->generation == BE_GEN2) &&
+ (get_ufigen_type(fhdr) == BE_GEN2)) {
+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ status = -1;
}
pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
@@ -2049,6 +2109,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
+ int pcicfg_reg;
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
pci_resource_len(adapter->pdev, 2));
@@ -2062,8 +2123,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
goto pci_map_err;
adapter->db = addr;
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
- pci_resource_len(adapter->pdev, 1));
+ if (adapter->generation == BE_GEN2)
+ pcicfg_reg = 1;
+ else
+ pcicfg_reg = 0;
+
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
if (addr == NULL)
goto pci_map_err;
adapter->pcicfg = addr;
@@ -2128,6 +2194,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
+ pci_save_state(adapter->pdev);
return 0;
free_mbox:
@@ -2160,6 +2227,7 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
if (cmd->va == NULL)
return -1;
+ memset(cmd->va, 0, cmd->size);
return 0;
}
@@ -2213,6 +2281,11 @@ static int be_get_config(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ if (adapter->cap & 0x400)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
return 0;
}
@@ -2238,6 +2311,20 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto rel_reg;
}
adapter = netdev_priv(netdev);
+
+ switch (pdev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ adapter->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID2:
+ adapter->generation = BE_GEN3;
+ break;
+ default:
+ adapter->generation = 0;
+ }
+
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
@@ -2371,13 +2458,123 @@ static int be_resume(struct pci_dev *pdev)
return 0;
}
+/*
+ * An FLR will stop BE from DMAing any data.
+ */
+static void be_shutdown(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ be_cmd_reset_function(adapter);
+
+ if (adapter->wol)
+ be_setup_wol(adapter, true);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ adapter->eeh_err = true;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_clear(adapter);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+ adapter->eeh_err = false;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ status = be_cmd_POST(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto err;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ goto err;
+ }
+ netif_device_attach(netdev);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+ return;
+}
+
+static struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
static struct pci_driver be_driver = {
.name = DRV_NAME,
.id_table = be_dev_ids,
.probe = be_probe,
.remove = be_remove,
.suspend = be_suspend,
- .resume = be_resume
+ .resume = be_resume,
+ .shutdown = be_shutdown,
+ .err_handler = &be_eeh_handlers
};
static int __init be_init_module(void)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 8ffea3990d07..587f93cf03f6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
+#include <asm/dpmc.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/portmux.h>
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev)
u32 sclk, mdc_div;
/* Enable PHY output early */
- if (!(bfin_read_VR_CTL() & PHYCLKOE))
- bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+ if (!(bfin_read_VR_CTL() & CLKBUFOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
sclk = get_sclk();
mdc_div = ((sclk / MDC_CLK) / 2) - 1;
@@ -811,16 +812,14 @@ static void bfin_mac_timeout(struct net_device *dev)
static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
emac_hashhi = emac_hashlo = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* skip non-multicast addresses */
if (!(*addrs & 1))
@@ -861,7 +860,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* set up multicast hash table */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= HM;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9b587c344194..598b007f1991 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -20,6 +20,7 @@
#include <linux/crc32.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
+#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
@@ -973,7 +974,7 @@ static void bmac_set_multicast(struct net_device *dev)
{
struct dev_mc_list *dmi;
struct bmac_data *bp = netdev_priv(dev);
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
int i;
@@ -982,7 +983,7 @@ static void bmac_set_multicast(struct net_device *dev)
XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1000,7 +1001,7 @@ static void bmac_set_multicast(struct net_device *dev)
rx_cfg = bmac_rx_on(dev, 0, 0);
XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
} else {
- for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
bmac_addhash(bp, dmi->dmi_addr);
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1015,13 +1016,13 @@ static void bmac_set_multicast(struct net_device *dev)
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i;
unsigned short rx_cfg;
u32 crc;
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
bmwrite(dev, BHASH0, 0xffff);
bmwrite(dev, BHASH1, 0xffff);
bmwrite(dev, BHASH2, 0xffff);
@@ -1039,9 +1040,8 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- for(i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if(!(*addrs & 1))
continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e4..ac90a3828f69 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2009 Broadcom Corporation
+ * Copyright (c) 2004-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -9,6 +9,7 @@
* Written by: Michael Chan (mchan@broadcom.com)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -48,7 +49,6 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/list.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@@ -58,14 +58,13 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "2.0.3"
-#define DRV_MODULE_RELDATE "Dec 03, 2009"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
+#define DRV_MODULE_VERSION "2.0.9"
+#define DRV_MODULE_RELDATE "April 27, 2010"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw"
-#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
-#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
+#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
+#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
#define RUN_AT(x) (jiffies + (x))
@@ -247,6 +246,8 @@ static const struct flash_spec flash_5709 = {
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
+static void bnx2_init_napi(struct bnx2 *bp);
+
static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
{
u32 diff;
@@ -650,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
}
static void
-bnx2_netif_stop(struct bnx2 *bp)
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
{
- bnx2_cnic_stop(bp);
+ if (stop_cnic)
+ bnx2_cnic_stop(bp);
if (netif_running(bp->dev)) {
int i;
@@ -670,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
}
static void
-bnx2_netif_start(struct bnx2 *bp)
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
- bnx2_cnic_start(bp);
+ if (start_cnic)
+ bnx2_cnic_start(bp);
}
}
}
@@ -980,33 +983,27 @@ bnx2_report_link(struct bnx2 *bp)
{
if (bp->link_up) {
netif_carrier_on(bp->dev);
- printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
- bnx2_xceiver_str(bp));
-
- printk("%d Mbps ", bp->line_speed);
-
- if (bp->duplex == DUPLEX_FULL)
- printk("full duplex");
- else
- printk("half duplex");
+ netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
+ bnx2_xceiver_str(bp),
+ bp->line_speed,
+ bp->duplex == DUPLEX_FULL ? "full" : "half");
if (bp->flow_ctrl) {
if (bp->flow_ctrl & FLOW_CTRL_RX) {
- printk(", receive ");
+ pr_cont(", receive ");
if (bp->flow_ctrl & FLOW_CTRL_TX)
- printk("& transmit ");
+ pr_cont("& transmit ");
}
else {
- printk(", transmit ");
+ pr_cont(", transmit ");
}
- printk("flow control ON");
+ pr_cont("flow control ON");
}
- printk("\n");
- }
- else {
+ pr_cont("\n");
+ } else {
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
- bnx2_xceiver_str(bp));
+ netdev_err(bp->dev, "NIC %s Link is Down\n",
+ bnx2_xceiver_str(bp));
}
bnx2_report_fw_link(bp);
@@ -1278,7 +1275,7 @@ bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
if (lo_water >= bp->rx_ring_size)
lo_water = 0;
- hi_water = bp->rx_ring_size / 4;
+ hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
if (hi_water <= lo_water)
lo_water = 0;
@@ -2483,8 +2480,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
/* If we timed out, inform the firmware that this is the case. */
if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
if (!silent)
- printk(KERN_ERR PFX "fw sync timeout, reset code = "
- "%x\n", msg_data);
+ pr_err("fw sync timeout, reset code = %x\n", msg_data);
msg_data &= ~BNX2_DRV_MSG_CODE;
msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
@@ -2600,8 +2596,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
if (good_mbuf == NULL) {
- printk(KERN_ERR PFX "Failed to allocate memory in "
- "bnx2_alloc_bad_rbuf\n");
+ pr_err("Failed to allocate memory in %s\n", __func__);
return -ENOMEM;
}
@@ -3561,9 +3556,7 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
bit = crc & 0xff;
regidx = (bit & 0xe0) >> 5;
@@ -3579,14 +3572,14 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
- if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
+ if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
BNX2_RPM_SORT_USER0_PROM_VLAN;
} else if (!(dev->flags & IFF_PROMISC)) {
/* Add all entries into to the match filter list */
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
bnx2_set_mac_addr(bp, ha->addr,
i + BNX2_START_UNICAST_ADDRESS_INDEX);
sort_mode |= (1 <<
@@ -3657,15 +3650,13 @@ bnx2_request_firmware(struct bnx2 *bp)
rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
if (rc) {
- printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
- mips_fw_file);
+ pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
return rc;
}
rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
if (rc) {
- printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
- rv2p_fw_file);
+ pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
return rc;
}
mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
@@ -3676,15 +3667,13 @@ bnx2_request_firmware(struct bnx2 *bp)
check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
- printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
- mips_fw_file);
+ pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
return -EINVAL;
}
if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
- printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
- rv2p_fw_file);
+ pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
return -EINVAL;
}
@@ -4318,7 +4307,7 @@ bnx2_init_nvram(struct bnx2 *bp)
if (j == entry_count) {
bp->flash_info = NULL;
- printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
+ pr_alert("Unknown flash/EEPROM type\n");
return -ENODEV;
}
@@ -4738,7 +4727,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
- printk(KERN_ERR PFX "Chip reset did not complete\n");
+ pr_err("Chip reset did not complete\n");
return -EBUSY;
}
}
@@ -4746,7 +4735,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Make sure byte swapping is properly configured. */
val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
if (val != 0x01020304) {
- printk(KERN_ERR PFX "Chip not in correct endian mode\n");
+ pr_err("Chip not in correct endian mode\n");
return -ENODEV;
}
@@ -4772,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
rc = bnx2_alloc_bad_rbuf(bp);
}
- if (bp->flags & BNX2_FLAG_USING_MSIX)
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
bnx2_setup_msix_tbl(bp);
+ /* Prevent MSIX table reads and write from timing out */
+ REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+ BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+ }
return rc;
}
@@ -4941,7 +4934,7 @@ bnx2_init_chip(struct bnx2 *bp)
BNX2_HC_CONFIG_COLLECT_STATS;
}
- if (bp->irq_nvecs > 1) {
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
BNX2_HC_MSIX_BIT_VECTOR_VAL);
@@ -5167,9 +5160,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
- printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
- "with %d/%d pages only\n",
- bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
+ netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
+ ring_num, i, bp->rx_pg_ring_size);
break;
}
prod = NEXT_RX_BD(prod);
@@ -5180,9 +5172,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
- printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
- "%d/%d skbs only\n",
- bp->dev->name, ring_num, i, bp->rx_ring_size);
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ ring_num, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX_BD(prod);
@@ -6145,6 +6136,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+ /* Need to flush the previous three writes to ensure MSI-X
+ * is setup properly */
+ REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
@@ -6210,6 +6205,7 @@ bnx2_open(struct net_device *dev)
bnx2_disable_int(bp);
bnx2_setup_int_mode(bp, disable_msi);
+ bnx2_init_napi(bp);
bnx2_napi_enable(bp);
rc = bnx2_alloc_mem(bp);
if (rc)
@@ -6227,6 +6223,8 @@ bnx2_open(struct net_device *dev)
atomic_set(&bp->intr_sem, 0);
+ memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
+
bnx2_enable_int(bp);
if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6234,11 +6232,7 @@ bnx2_open(struct net_device *dev)
* If MSI test fails, go back to INTx mode
*/
if (bnx2_test_intr(bp) != 0) {
- printk(KERN_WARNING PFX "%s: No interrupt was generated"
- " using MSI, switching to INTx mode. Please"
- " report this failure to the PCI maintainer"
- " and include system chipset information.\n",
- bp->dev->name);
+ netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
bnx2_disable_int(bp);
bnx2_free_irq(bp);
@@ -6258,9 +6252,9 @@ bnx2_open(struct net_device *dev)
}
}
if (bp->flags & BNX2_FLAG_USING_MSI)
- printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
+ netdev_info(dev, "using MSI\n");
else if (bp->flags & BNX2_FLAG_USING_MSIX)
- printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
+ netdev_info(dev, "using MSIX\n");
netif_tx_start_all_queues(dev);
@@ -6285,12 +6279,12 @@ bnx2_reset_task(struct work_struct *work)
return;
}
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 1);
atomic_set(&bp->intr_sem, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
rtnl_unlock();
}
@@ -6299,20 +6293,18 @@ bnx2_dump_state(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
- printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
- atomic_read(&bp->intr_sem));
- printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
- "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
- REG_RD(bp, BNX2_EMAC_TX_STATUS),
- REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
- printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
- dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
- printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
- dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+ netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
+ netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
+ REG_RD(bp, BNX2_EMAC_TX_STATUS),
+ REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+ netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+ bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
+ netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
+ REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
if (bp->flags & BNX2_FLAG_USING_MSIX)
- printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
- REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
+ netdev_err(dev, "DEBUG: PBA[%08x]\n",
+ REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
}
static void
@@ -6334,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev))
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, false);
bp->vlgrp = vlgrp;
@@ -6345,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, false);
}
#endif
@@ -6376,8 +6368,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(bnx2_tx_avail(bp, txr) <
(skb_shinfo(skb)->nr_frags + 1))) {
netif_tx_stop_queue(txq);
- printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -6538,92 +6529,121 @@ bnx2_close(struct net_device *dev)
return 0;
}
-#define GET_NET_STATS64(ctr) \
+static void
+bnx2_save_stats(struct bnx2 *bp)
+{
+ u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+ int i;
+
+ /* The 1st 10 counters are 64-bit counters */
+ for (i = 0; i < 20; i += 2) {
+ u32 hi;
+ u64 lo;
+
+ hi = temp_stats[i] + hw_stats[i];
+ lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
+ if (lo > 0xffffffff)
+ hi++;
+ temp_stats[i] = hi;
+ temp_stats[i + 1] = lo & 0xffffffff;
+ }
+
+ for ( ; i < sizeof(struct statistics_block) / 4; i++)
+ temp_stats[i] += hw_stats[i];
+}
+
+#define GET_64BIT_NET_STATS64(ctr) \
(unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
(unsigned long) (ctr##_lo)
-#define GET_NET_STATS32(ctr) \
+#define GET_64BIT_NET_STATS32(ctr) \
(ctr##_lo)
#if (BITS_PER_LONG == 64)
-#define GET_NET_STATS GET_NET_STATS64
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
#else
-#define GET_NET_STATS GET_NET_STATS32
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
#endif
+#define GET_32BIT_NET_STATS(ctr) \
+ (unsigned long) (bp->stats_blk->ctr + \
+ bp->temp_stats_blk->ctr)
+
static struct net_device_stats *
bnx2_get_stats(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
- struct statistics_block *stats_blk = bp->stats_blk;
struct net_device_stats *net_stats = &dev->stats;
if (bp->stats_blk == NULL) {
return net_stats;
}
net_stats->rx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
net_stats->tx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
net_stats->rx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCInOctets);
+ GET_64BIT_NET_STATS(stat_IfHCInOctets);
net_stats->tx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
+ GET_64BIT_NET_STATS(stat_IfHCOutOctets);
net_stats->multicast =
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
net_stats->collisions =
- (unsigned long) stats_blk->stat_EtherStatsCollisions;
+ GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
net_stats->rx_length_errors =
- (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
- stats_blk->stat_EtherStatsOverrsizePkts);
+ GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
+ GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
net_stats->rx_over_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
net_stats->rx_frame_errors =
- (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
net_stats->rx_crc_errors =
- (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
net_stats->rx_errors = net_stats->rx_length_errors +
net_stats->rx_over_errors + net_stats->rx_frame_errors +
net_stats->rx_crc_errors;
net_stats->tx_aborted_errors =
- (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
- stats_blk->stat_Dot3StatsLateCollisions);
+ GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
+ GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
(CHIP_ID(bp) == CHIP_ID_5708_A0))
net_stats->tx_carrier_errors = 0;
else {
net_stats->tx_carrier_errors =
- (unsigned long)
- stats_blk->stat_Dot3StatsCarrierSenseErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
}
net_stats->tx_errors =
- (unsigned long)
- stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
- +
+ GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
net_stats->tx_aborted_errors +
net_stats->tx_carrier_errors;
net_stats->rx_missed_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+ GET_32BIT_NET_STATS(stat_FwRxDrop);
return net_stats;
}
@@ -6717,32 +6737,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
- cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
-
- /* allow advertising 1 speed */
- if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
- (cmd->advertising == ADVERTISED_10baseT_Full) ||
- (cmd->advertising == ADVERTISED_100baseT_Half) ||
- (cmd->advertising == ADVERTISED_100baseT_Full)) {
-
- if (cmd->port == PORT_FIBRE)
- goto err_out_unlock;
-
- advertising = cmd->advertising;
-
- } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
- if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
- (cmd->port == PORT_TP))
- goto err_out_unlock;
- } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
- advertising = cmd->advertising;
- else if (cmd->advertising == ADVERTISED_1000baseT_Half)
- goto err_out_unlock;
- else {
- if (cmd->port == PORT_FIBRE)
- advertising = ETHTOOL_ALL_FIBRE_SPEED;
- else
+ advertising = cmd->advertising;
+ if (cmd->port == PORT_TP) {
+ advertising &= ETHTOOL_ALL_COPPER_SPEED;
+ if (!advertising)
advertising = ETHTOOL_ALL_COPPER_SPEED;
+ } else {
+ advertising &= ETHTOOL_ALL_FIBRE_SPEED;
+ if (!advertising)
+ advertising = ETHTOOL_ALL_FIBRE_SPEED;
}
advertising |= ADVERTISED_Autoneg;
}
@@ -7054,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
if (netif_running(bp->dev)) {
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 0);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
return 0;
@@ -7083,7 +7086,10 @@ static int
bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
{
if (netif_running(bp->dev)) {
- bnx2_netif_stop(bp);
+ /* Reset will erase chipset stats; save them */
+ bnx2_save_stats(bp);
+
+ bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
@@ -7104,7 +7110,14 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
dev_close(bp->dev);
return rc;
}
- bnx2_netif_start(bp);
+#ifdef BCM_CNIC
+ mutex_lock(&bp->cnic_lock);
+ /* Let cnic know about the new status block. */
+ if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
+ bnx2_setup_cnic_irq_info(bp);
+ mutex_unlock(&bp->cnic_lock);
+#endif
+ bnx2_netif_start(bp, true);
}
return 0;
}
@@ -7357,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i;
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
bnx2_free_skbs(bp);
@@ -7376,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
bnx2_shutdown_chip(bp);
else {
bnx2_init_nic(bp, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
/* wait for link up */
@@ -7427,6 +7440,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
struct bnx2 *bp = netdev_priv(dev);
int i;
u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
u8 *stats_len_arr = NULL;
if (hw_stats == NULL) {
@@ -7443,21 +7457,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
stats_len_arr = bnx2_5708_stats_len_arr;
for (i = 0; i < BNX2_NUM_STATS; i++) {
+ unsigned long offset;
+
if (stats_len_arr[i] == 0) {
/* skip this counter */
buf[i] = 0;
continue;
}
+
+ offset = bnx2_stats_offset_arr[i];
if (stats_len_arr[i] == 4) {
/* 4-byte counter */
- buf[i] = (u64)
- *(hw_stats + bnx2_stats_offset_arr[i]);
+ buf[i] = (u64) *(hw_stats + offset) +
+ *(temp_stats + offset);
continue;
}
/* 8-byte counter */
- buf[i] = (((u64) *(hw_stats +
- bnx2_stats_offset_arr[i])) << 32) +
- *(hw_stats + bnx2_stats_offset_arr[i] + 1);
+ buf[i] = (((u64) *(hw_stats + offset)) << 32) +
+ *(hw_stats + offset + 1) +
+ (((u64) *(temp_stats + offset)) << 32) +
+ *(temp_stats + offset + 1);
}
}
@@ -7625,7 +7644,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
}
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void
poll_bnx2(struct net_device *dev)
{
@@ -7633,9 +7652,11 @@ poll_bnx2(struct net_device *dev)
int i;
for (i = 0; i < bp->irq_nvecs; i++) {
- disable_irq(bp->irq_tbl[i].vector);
- bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
- enable_irq(bp->irq_tbl[i].vector);
+ struct bnx2_irq *irq = &bp->irq_tbl[i];
+
+ disable_irq(irq->vector);
+ irq->handler(irq->vector, &bp->bnx2_napi[i]);
+ enable_irq(irq->vector);
}
}
#endif
@@ -7733,10 +7754,9 @@ bnx2_get_pci_speed(struct bnx2 *bp)
static void __devinit
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
- int rc, i, v0_len = 0;
+ int rc, i, j;
u8 *data;
- u8 *v0_str = NULL;
- bool mn_match = false;
+ unsigned int block_end, rosize, len;
#define BNX2_VPD_NVRAM_OFFSET 0x300
#define BNX2_VPD_LEN 128
@@ -7758,53 +7778,42 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
data[i + 3] = data[i + BNX2_VPD_LEN];
}
- for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
- unsigned char val = data[i];
- unsigned int block_end;
-
- if (val == 0x82 || val == 0x91) {
- i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
- continue;
- }
-
- if (val != 0x90)
- goto vpd_done;
+ i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto vpd_done;
- block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
- i += 3;
+ rosize = pci_vpd_lrdt_size(&data[i]);
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ block_end = i + rosize;
- if (block_end > BNX2_VPD_LEN)
- goto vpd_done;
+ if (block_end > BNX2_VPD_LEN)
+ goto vpd_done;
- while (i < (block_end - 2)) {
- int len = data[i + 2];
+ j = pci_vpd_find_info_keyword(data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j < 0)
+ goto vpd_done;
- if (i + 3 + len > block_end)
- goto vpd_done;
+ len = pci_vpd_info_field_size(&data[j]);
- if (data[i] == 'M' && data[i + 1] == 'N') {
- if (len != 4 ||
- memcmp(&data[i + 3], "1028", 4))
- goto vpd_done;
- mn_match = true;
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&data[j], "1028", 4))
+ goto vpd_done;
- } else if (data[i] == 'V' && data[i + 1] == '0') {
- if (len > BNX2_MAX_VER_SLEN)
- goto vpd_done;
+ j = pci_vpd_find_info_keyword(data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto vpd_done;
- v0_len = len;
- v0_str = &data[i + 3];
- }
- i += 3 + len;
+ len = pci_vpd_info_field_size(&data[j]);
- if (mn_match && v0_str) {
- memcpy(bp->fw_version, v0_str, v0_len);
- bp->fw_version[v0_len] = ' ';
- goto vpd_done;
- }
- }
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
goto vpd_done;
- }
+
+ memcpy(bp->fw_version, &data[j], len);
+ bp->fw_version[len] = ' ';
vpd_done:
kfree(data);
@@ -7825,23 +7834,31 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags = 0;
bp->phy_flags = 0;
+ bp->temp_stats_blk =
+ kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
+
+ if (bp->temp_stats_blk == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device(pdev);
if (rc) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev,
- "Cannot find PCI device base address, aborting.\n");
+ "Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -7851,7 +7868,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
dev_err(&pdev->dev,
- "Cannot find power management capability, aborting.\n");
+ "Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -7874,7 +7891,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->regview = ioremap_nocache(dev->base_addr, mem_len);
if (!bp->regview) {
- dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
+ dev_err(&pdev->dev, "Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -7894,7 +7911,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
dev_err(&pdev->dev,
- "Cannot find PCIE capability, aborting.\n");
+ "Cannot find PCIE capability, aborting\n");
rc = -EIO;
goto err_out_unmap;
}
@@ -7905,7 +7922,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
if (bp->pcix_cap == 0) {
dev_err(&pdev->dev,
- "Cannot find PCIX capability, aborting.\n");
+ "Cannot find PCIX capability, aborting\n");
rc = -EIO;
goto err_out_unmap;
}
@@ -7934,11 +7951,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting.\n");
+ "pci_set_consistent_dma_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
- dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
+ dev_err(&pdev->dev, "System does not support DMA, aborting\n");
goto err_out_unmap;
}
@@ -7955,7 +7972,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
!(bp->flags & BNX2_FLAG_PCIX)) {
dev_err(&pdev->dev,
- "5706 A1 can only be used in a PCIX bus, aborting.\n");
+ "5706 A1 can only be used in a PCIX bus, aborting\n");
goto err_out_unmap;
}
@@ -7978,7 +7995,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
BNX2_DEV_INFO_SIGNATURE_MAGIC) {
- dev_err(&pdev->dev, "Firmware not running, aborting.\n");
+ dev_err(&pdev->dev, "Firmware not running, aborting\n");
rc = -ENODEV;
goto err_out_unmap;
}
@@ -8201,7 +8218,7 @@ bnx2_init_napi(struct bnx2 *bp)
{
int i;
- for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ for (i = 0; i < bp->irq_nvecs; i++) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
int (*poll)(struct napi_struct *, int);
@@ -8229,7 +8246,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2,
#endif
};
@@ -8251,7 +8268,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
char str[40];
if (version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
@@ -8270,7 +8287,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnx2_ethtool_ops;
bp = netdev_priv(dev);
- bnx2_init_napi(bp);
pci_set_drvdata(pdev, dev);
@@ -8301,15 +8317,13 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error;
}
- printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
- "IRQ %d, node addr %pM\n",
- dev->name,
- board_info[ent->driver_data].name,
- ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
- ((CHIP_ID(bp) & 0x0ff0) >> 4),
- bnx2_bus_string(bp, str),
- dev->base_addr,
- bp->pdev->irq, dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+ ((CHIP_ID(bp) & 0x0ff0) >> 4),
+ bnx2_bus_string(bp, str),
+ dev->base_addr,
+ bp->pdev->irq, dev->dev_addr);
return 0;
@@ -8346,6 +8360,8 @@ bnx2_remove_one(struct pci_dev *pdev)
if (bp->regview)
iounmap(bp->regview);
+ kfree(bp->temp_stats_blk);
+
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -8367,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
flush_scheduled_work();
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
@@ -8389,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev)
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
bnx2_init_nic(bp, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
return 0;
}
@@ -8416,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
}
if (netif_running(dev)) {
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
del_timer_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
@@ -8442,7 +8458,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
rtnl_lock();
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
- "Cannot re-enable PCI device after reset.\n");
+ "Cannot re-enable PCI device after reset\n");
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -8473,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(dev))
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
netif_device_attach(dev);
rtnl_unlock();
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 939dc44d50a0..cd4b0e4637ab 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -349,7 +349,7 @@ struct l2_fhdr {
#define BNX2_L2CTX_BD_PRE_READ 0x00000000
#define BNX2_L2CTX_CTX_SIZE 0x00000000
#define BNX2_L2CTX_CTX_TYPE 0x00000000
-#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 32
+#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 4
#define BNX2_L2CTX_LO_WATER_MARK_SCALE 4
#define BNX2_L2CTX_LO_WATER_MARK_DIS 0
#define BNX2_L2CTX_HI_WATER_MARK_SHIFT 4
@@ -6851,6 +6851,7 @@ struct bnx2 {
dma_addr_t status_blk_mapping;
struct statistics_block *stats_blk;
+ struct statistics_block *temp_stats_blk;
dma_addr_t stats_blk_mapping;
int ctx_pages;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 602ab86b6392..3c48a7a68308 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -44,7 +44,6 @@
/* error/debug prints */
#define DRV_MODULE_NAME "bnx2x"
-#define PFX DRV_MODULE_NAME ": "
/* for messages that are currently off */
#define BNX2X_MSG_OFF 0
@@ -58,30 +57,40 @@
#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
/* regular debug print */
-#define DP(__mask, __fmt, __args...) do { \
- if (bp->msglevel & (__mask)) \
- printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", ##__args); \
- } while (0)
+#define DP(__mask, __fmt, __args...) \
+do { \
+ if (bp->msg_enable & (__mask)) \
+ printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__args); \
+} while (0)
/* errors debug print */
-#define BNX2X_DBG_ERR(__fmt, __args...) do { \
- if (bp->msglevel & NETIF_MSG_PROBE) \
- printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", ##__args); \
- } while (0)
+#define BNX2X_DBG_ERR(__fmt, __args...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ pr_err("[%s:%d(%s)]" __fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__args); \
+} while (0)
/* for errors (never masked) */
-#define BNX2X_ERR(__fmt, __args...) do { \
- printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", ##__args); \
- } while (0)
+#define BNX2X_ERR(__fmt, __args...) \
+do { \
+ pr_err("[%s:%d(%s)]" __fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__args); \
+} while (0)
/* before we have a dev->name use dev_info() */
-#define BNX2X_DEV_INFO(__fmt, __args...) do { \
- if (bp->msglevel & NETIF_MSG_PROBE) \
- dev_info(&bp->pdev->dev, __fmt, ##__args); \
- } while (0)
+#define BNX2X_DEV_INFO(__fmt, __args...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ dev_info(&bp->pdev->dev, __fmt, ##__args); \
+} while (0)
#ifdef BNX2X_STOP_ON_ERROR
@@ -130,7 +139,7 @@
offset, len32); \
} while (0)
-#define VIRT_WR_DMAE_LEN(bp, data, addr, len32) \
+#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
do { \
memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
bnx2x_write_big_buf_wb(bp, addr, len32); \
@@ -882,7 +891,7 @@ struct bnx2x {
/* End of fields used in the performance code paths */
int panic;
- int msglevel;
+ int msg_enable;
u32 flags;
#define PCIX_FLAG 1
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index 931dcace5628..08d71bf438d6 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -471,6 +471,11 @@
/* Host coalescing constants */
+#define HC_IGU_BC_MODE 0
+#define HC_IGU_NBC_MODE 1
+
+#define HC_REGULAR_SEGMENT 0
+#define HC_DEFAULT_SEGMENT 1
/* index numbers */
#define HC_USTORM_DEF_SB_NUM_INDICES 8
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 52585338ada8..760069345b11 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1261,7 +1261,7 @@ struct host_func_stats {
#define BCM_5710_FW_MAJOR_VERSION 5
#define BCM_5710_FW_MINOR_VERSION 2
-#define BCM_5710_FW_REVISION_VERSION 7
+#define BCM_5710_FW_REVISION_VERSION 13
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -2433,8 +2433,10 @@ struct common_ramrod_eth_rx_cqe {
u8 ramrod_type;
#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x7F<<1)
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 1
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2
u8 conn_type;
__le16 reserved1;
__le32 conn_and_cmd_data;
diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x_init_ops.h
index 38b970a14fd7..2b1363a6fe78 100644
--- a/drivers/net/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -138,11 +138,16 @@ static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
{
+ const u32 *old_data = data;
+
data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
- if (bp->dmae_ready)
- VIRT_WR_DMAE_LEN(bp, data, addr, len);
- else
+ if (bp->dmae_ready) {
+ if (old_data != data)
+ VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
+ else
+ VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
+ } else
bnx2x_init_ind_wr(bp, addr, data, len);
}
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index cf5778919b4b..32e79c359e89 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -14,6 +14,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
@@ -2987,11 +2989,8 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- printk(KERN_INFO PFX "Warning: "
- "Unqualified SFP+ module "
- "detected on %s, Port %d from %s part number %s\n"
- , bp->dev->name, params->port,
- vendor_name, vendor_pn);
+ netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
return -EINVAL;
}
@@ -4846,16 +4845,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
" has been detected on "
"port %d\n",
params->port);
- printk(KERN_ERR PFX "Error: Power"
- " fault on %s Port %d has"
- " been detected and the"
- " power to that SFP+ module"
- " has been removed to prevent"
- " failure of the card. Please"
- " remove the SFP+ module and"
- " restart the system to clear"
- " this error.\n"
- , bp->dev->name, params->port);
+ netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
+ params->port);
/*
* Disable all RX_ALARMs except for
* mod_abs
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 77ba13520d87..6c042a72d6cc 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,8 +57,8 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
-#define DRV_MODULE_VERSION "1.52.1-5"
-#define DRV_MODULE_RELDATE "2009/11/09"
+#define DRV_MODULE_VERSION "1.52.1-7"
+#define DRV_MODULE_RELDATE "2010/02/28"
#define BNX2X_BC_VER 0x040200
#include <linux/firmware.h>
@@ -140,7 +140,7 @@ static struct {
};
-static const struct pci_device_id bnx2x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -514,24 +514,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
mark = ((mark + 0x3) & ~0x3);
- printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
+ pr_err("begin fw dump (mark 0x%x)\n", mark);
- printk(KERN_ERR PFX);
+ pr_err("");
for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
offset + 4*word));
data[8] = 0x0;
- printk(KERN_CONT "%s", (char *)data);
+ pr_cont("%s", (char *)data);
}
for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
offset + 4*word));
data[8] = 0x0;
- printk(KERN_CONT "%s", (char *)data);
+ pr_cont("%s", (char *)data);
}
- printk(KERN_ERR PFX "end of fw dump\n");
+ pr_err("end of fw dump\n");
}
static void bnx2x_panic_dump(struct bnx2x *bp)
@@ -893,7 +893,6 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
u16 prod;
u16 cons;
- barrier(); /* Tell compiler that prod and cons can change */
prod = fp->tx_bd_prod;
cons = fp->tx_bd_cons;
@@ -957,21 +956,34 @@ static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
fp->tx_pkt_cons = sw_cons;
fp->tx_bd_cons = bd_cons;
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ */
+ smp_mb();
+
/* TBD need a thresh? */
if (unlikely(netif_tx_queue_stopped(txq))) {
-
- /* Need to make the tx_bd_cons update visible to start_xmit()
- * before checking for netif_tx_queue_stopped(). Without the
- * memory barrier, there is a small possibility that
- * start_xmit() will miss it and cause the queue to be stopped
- * forever.
+ /* Taking tx_lock() is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in bnx2x_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
*/
- smp_mb();
+
+ __netif_tx_lock(txq, smp_processor_id());
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
(bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
}
return 0;
}
@@ -2136,7 +2148,7 @@ static void bnx2x_link_report(struct bnx2x *bp)
{
if (bp->flags & MF_FUNC_DIS) {
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
+ netdev_err(bp->dev, "NIC Link is Down\n");
return;
}
@@ -2145,7 +2157,7 @@ static void bnx2x_link_report(struct bnx2x *bp)
if (bp->state == BNX2X_STATE_OPEN)
netif_carrier_on(bp->dev);
- printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
+ netdev_info(bp->dev, "NIC Link is Up, ");
line_speed = bp->link_vars.line_speed;
if (IS_E1HMF(bp)) {
@@ -2157,29 +2169,29 @@ static void bnx2x_link_report(struct bnx2x *bp)
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
}
- printk("%d Mbps ", line_speed);
+ pr_cont("%d Mbps ", line_speed);
if (bp->link_vars.duplex == DUPLEX_FULL)
- printk("full duplex");
+ pr_cont("full duplex");
else
- printk("half duplex");
+ pr_cont("half duplex");
if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
- printk(", receive ");
+ pr_cont(", receive ");
if (bp->link_vars.flow_ctrl &
BNX2X_FLOW_CTRL_TX)
- printk("& transmit ");
+ pr_cont("& transmit ");
} else {
- printk(", transmit ");
+ pr_cont(", transmit ");
}
- printk("flow control ON");
+ pr_cont("flow control ON");
}
- printk("\n");
+ pr_cont("\n");
} else { /* link_down */
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
+ netdev_err(bp->dev, "NIC Link is Down\n");
}
}
@@ -2898,10 +2910,8 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
bp->link_params.ext_phy_config);
/* log the failure */
- printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
- " the driver to shutdown the card to prevent permanent"
- " damage. Please contact Dell Support for assistance\n",
- bp->dev->name);
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
+ "Please contact Dell Support for assistance.\n");
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4296,7 +4306,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_net_stats_update(bp);
bnx2x_drv_stats_update(bp);
- if (bp->msglevel & NETIF_MSG_TIMER) {
+ if (netif_msg_timer(bp)) {
struct bnx2x_fastpath *fp0_rx = bp->fp;
struct bnx2x_fastpath *fp0_tx = bp->fp;
struct tstorm_per_client_stats *old_tclient =
@@ -4306,7 +4316,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
struct net_device_stats *nstats = &bp->dev->stats;
int i;
- printk(KERN_DEBUG "%s:\n", bp->dev->name);
+ netdev_printk(KERN_DEBUG, bp->dev, "\n");
printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
" tx pkt (%lx)\n",
bnx2x_tx_avail(fp0_tx),
@@ -4464,7 +4474,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
/* Make sure the state has been "changed" */
smp_wmb();
- if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
+ if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
}
@@ -5674,8 +5684,7 @@ gunzip_nomem2:
bp->gunzip_buf = NULL;
gunzip_nomem1:
- printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
- " un-compression\n", bp->dev->name);
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
return -ENOMEM;
}
@@ -5721,14 +5730,13 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
rc = zlib_inflate(bp->strm, Z_FINISH);
if ((rc != Z_OK) && (rc != Z_STREAM_END))
- printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
- bp->dev->name, bp->strm->msg);
+ netdev_err(bp->dev, "Firmware decompression error: %s\n",
+ bp->strm->msg);
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- printk(KERN_ERR PFX "%s: Firmware decompression error:"
- " gunzip_outlen (%d) not aligned\n",
- bp->dev->name, bp->gunzip_outlen);
+ netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
zlib_inflateEnd(bp->strm);
@@ -6213,8 +6221,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- printk(KERN_ALERT PFX "please adjust the size of"
- " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
+ pr_alert("please adjust the size of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
val = (4 << 24) + (0 << 12) + 1024;
@@ -6938,19 +6946,21 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
}
}
-static void bnx2x_free_irq(struct bnx2x *bp)
+static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
{
if (bp->flags & USING_MSIX_FLAG) {
- bnx2x_free_msix_irqs(bp);
+ if (!disable_only)
+ bnx2x_free_msix_irqs(bp);
pci_disable_msix(bp->pdev);
bp->flags &= ~USING_MSIX_FLAG;
} else if (bp->flags & USING_MSI_FLAG) {
- free_irq(bp->pdev->irq, bp->dev);
+ if (!disable_only)
+ free_irq(bp->pdev->irq, bp->dev);
pci_disable_msi(bp->pdev);
bp->flags &= ~USING_MSI_FLAG;
- } else
+ } else if (!disable_only)
free_irq(bp->pdev->irq, bp->dev);
}
@@ -7018,11 +7028,10 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_QUEUES(bp);
- printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
- " ... fp[%d] %d\n",
- bp->dev->name, bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
return 0;
}
@@ -7443,8 +7452,10 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_set_num_queues(bp);
- if (bnx2x_alloc_mem(bp))
+ if (bnx2x_alloc_mem(bp)) {
+ bnx2x_free_irq(bp, true);
return -ENOMEM;
+ }
for_each_queue(bp, i)
bnx2x_fp(bp, i, disable_tpa) =
@@ -7459,7 +7470,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->flags & USING_MSIX_FLAG) {
rc = bnx2x_req_msix_irqs(bp);
if (rc) {
- pci_disable_msix(bp->pdev);
+ bnx2x_free_irq(bp, true);
goto load_error1;
}
} else {
@@ -7471,14 +7482,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_req_irq(bp);
if (rc) {
BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
- if (bp->flags & USING_MSI_FLAG)
- pci_disable_msi(bp->pdev);
+ bnx2x_free_irq(bp, true);
goto load_error1;
}
if (bp->flags & USING_MSI_FLAG) {
bp->dev->irq = bp->pdev->irq;
- printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
- bp->dev->name, bp->pdev->irq);
+ netdev_info(bp->dev, "using MSI IRQ %d\n",
+ bp->pdev->irq);
}
}
@@ -7527,6 +7537,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_init_hw(bp, load_code);
if (rc) {
BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
goto load_error2;
}
@@ -7593,6 +7606,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+ bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
+ CNIC_SB_ID(bp));
}
mutex_unlock(&bp->cnic_mutex);
#endif
@@ -7662,7 +7677,7 @@ load_error3:
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
load_error2:
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
load_error1:
bnx2x_napi_disable(bp);
for_each_queue(bp, i)
@@ -7853,7 +7868,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
/* Wait until tx fastpath tasks complete */
for_each_queue(bp, i) {
@@ -8295,8 +8310,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
- printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
- val, val2, val3, val4);
+ pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
}
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8907,17 +8921,15 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bnx2x_undi_unload(bp);
if (CHIP_REV_IS_FPGA(bp))
- printk(KERN_ERR PFX "FPGA detected\n");
+ pr_err("FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- printk(KERN_ERR PFX
- "MCP disabled, must load devices in order!\n");
+ pr_err("MCP disabled, must load devices in order!\n");
/* Set multi queue mode */
if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- printk(KERN_ERR PFX
- "Multi disabled since int_mode requested is not MSI-X\n");
+ pr_err("Multi disabled since int_mode requested is not MSI-X\n");
multi_mode = ETH_RSS_MODE_DISABLED;
}
bp->multi_mode = multi_mode;
@@ -9343,7 +9355,7 @@ static u32 bnx2x_get_msglevel(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- return bp->msglevel;
+ return bp->msg_enable;
}
static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
@@ -9351,7 +9363,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
struct bnx2x *bp = netdev_priv(dev);
if (capable(CAP_NET_ADMIN))
- bp->msglevel = level;
+ bp->msg_enable = level;
}
static int bnx2x_nway_reset(struct net_device *dev)
@@ -9960,12 +9972,14 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
/* TPA requires Rx CSUM offloading */
if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- bp->flags |= TPA_ENABLE_FLAG;
- changed = 1;
- }
-
+ if (!disable_tpa) {
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ bp->flags |= TPA_ENABLE_FLAG;
+ changed = 1;
+ }
+ } else
+ rc = -EINVAL;
} else if (dev->features & NETIF_F_LRO) {
dev->features &= ~NETIF_F_LRO;
bp->flags &= ~TPA_ENABLE_FLAG;
@@ -10423,7 +10437,8 @@ static int bnx2x_test_intr(struct bnx2x *bp)
config->hdr.length = 0;
if (CHIP_IS_E1(bp))
- config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
+ /* use last unicast entries */
+ config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
else
config->hdr.offset = BP_FUNC(bp);
config->hdr.client_id = bp->fp->cl_id;
@@ -10642,7 +10657,7 @@ static const struct {
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
#define IS_E1HMF_MODE_STAT(bp) \
- (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
+ (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
{
@@ -11413,9 +11428,12 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
netif_tx_stop_queue(txq);
- /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
- if we put Tx into XOFF state. */
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons */
smp_mb();
+
fp->eth_q_stats.driver_xoff++;
if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
netif_tx_wake_queue(txq);
@@ -11469,7 +11487,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) ||
- ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { /* some multicasts */
@@ -11479,10 +11498,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
- for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
- i++, mclist = mclist->next) {
-
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
config->config_table[i].
cam_entry.msb_mac_addr =
swab16(*(u16 *)&mclist->dmi_addr[0]);
@@ -11510,6 +11527,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
cam_entry.middle_mac_addr,
config->config_table[i].
cam_entry.lsb_mac_addr);
+ i++;
}
old = config->hdr.length;
if (old > i) {
@@ -11551,10 +11569,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
- for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
mclist->dmi_addr);
@@ -11729,7 +11744,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -11753,7 +11768,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2x_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
};
@@ -11774,20 +11789,18 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
rc = pci_enable_device(pdev);
if (rc) {
- printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
+ pr_err("Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find PCI device base address,"
- " aborting\n");
+ pr_err("Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find second PCI device"
- " base address, aborting\n");
+ pr_err("Cannot find second PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11795,8 +11808,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources,"
- " aborting\n");
+ pr_err("Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -11806,16 +11818,14 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
- printk(KERN_ERR PFX "Cannot find power management"
- " capability, aborting\n");
+ pr_err("Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (bp->pcie_cap == 0) {
- printk(KERN_ERR PFX "Cannot find PCI Express capability,"
- " aborting\n");
+ pr_err("Cannot find PCI Express capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11823,15 +11833,13 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
- " failed, aborting\n");
+ pr_err("pci_set_consistent_dma_mask failed, aborting\n");
rc = -EIO;
goto err_out_release;
}
} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- printk(KERN_ERR PFX "System does not support DMA,"
- " aborting\n");
+ pr_err("System does not support DMA, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11844,7 +11852,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->regview = pci_ioremap_bar(pdev, 0);
if (!bp->regview) {
- printk(KERN_ERR PFX "Cannot map register space, aborting\n");
+ pr_err("Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -11853,7 +11861,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
min_t(u64, BNX2X_DB_SIZE,
pci_resource_len(pdev, 2)));
if (!bp->doorbells) {
- printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
+ pr_err("Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
goto err_out_unmap;
}
@@ -11955,8 +11963,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- printk(KERN_ERR PFX "Section %d length is out of "
- "bounds\n", i);
+ pr_err("Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11968,8 +11975,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- printk(KERN_ERR PFX "Section offset %d is out of "
- "bounds\n", i);
+ pr_err("Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11981,8 +11987,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
- " Should be %d.%d.%d.%d\n",
+ pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
fw_ver[0], fw_ver[1], fw_ver[2],
fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
@@ -12032,18 +12037,17 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
target[i] = be16_to_cpu(source[i]);
}
-#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
- do { \
- u32 len = be32_to_cpu(fw_hdr->arr.len); \
- bp->arr = kmalloc(len, GFP_KERNEL); \
- if (!bp->arr) { \
- printk(KERN_ERR PFX "Failed to allocate %d bytes " \
- "for "#arr"\n", len); \
- goto lbl; \
- } \
- func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
- (u8 *)bp->arr, len); \
- } while (0)
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
+do { \
+ u32 len = be32_to_cpu(fw_hdr->arr.len); \
+ bp->arr = kmalloc(len, GFP_KERNEL); \
+ if (!bp->arr) { \
+ pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+ goto lbl; \
+ } \
+ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
+ (u8 *)bp->arr, len); \
+} while (0)
static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
{
@@ -12056,18 +12060,17 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
else
fw_file_name = FW_FILE_NAME_E1H;
- printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
+ pr_info("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, dev);
if (rc) {
- printk(KERN_ERR PFX "Can't load firmware file %s\n",
- fw_file_name);
+ pr_err("Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
+ pr_err("Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -12126,12 +12129,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
if (!dev) {
- printk(KERN_ERR PFX "Cannot allocate net device\n");
+ pr_err("Cannot allocate net device\n");
return -ENOMEM;
}
bp = netdev_priv(dev);
- bp->msglevel = debug;
+ bp->msg_enable = debug;
pci_set_drvdata(pdev, dev);
@@ -12148,7 +12151,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Set init arrays */
rc = bnx2x_init_firmware(bp, &pdev->dev);
if (rc) {
- printk(KERN_ERR PFX "Error loading firmware\n");
+ pr_err("Error loading firmware\n");
goto init_one_exit;
}
@@ -12159,12 +12162,11 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
- " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq);
- printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
return 0;
@@ -12192,7 +12194,7 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
struct bnx2x *bp;
if (!dev) {
- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+ pr_err("BAD net device from bnx2x_init_one\n");
return;
}
bp = netdev_priv(dev);
@@ -12225,7 +12227,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
struct bnx2x *bp;
if (!dev) {
- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+ pr_err("BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12257,7 +12259,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
int rc;
if (!dev) {
- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+ pr_err("BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12296,7 +12298,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
if (CHIP_IS_E1(bp)) {
struct mac_configuration_cmd *config =
@@ -12460,17 +12462,17 @@ static int __init bnx2x_init(void)
{
int ret;
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
bnx2x_wq = create_singlethread_workqueue("bnx2x");
if (bnx2x_wq == NULL) {
- printk(KERN_ERR PFX "Cannot create workqueue\n");
+ pr_err("Cannot create workqueue\n");
return -ENOMEM;
}
ret = pci_register_driver(&bnx2x_pci_driver);
if (ret) {
- printk(KERN_ERR PFX "Cannot register driver\n");
+ pr_err("Cannot register driver\n");
destroy_workqueue(bnx2x_wq);
}
return ret;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0fb7a4964e75..822f586d72af 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// check if any partner replys
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave->dev->master->name);
+ best->slave ? best->slave->dev->master->name : "NULL");
}
best->is_active = 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3f0071cfe56b..0075514bf32f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1235,6 +1235,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
write_lock_bh(&bond->curr_slave_lock);
}
}
+
+ /* resend IGMP joins since all were sent on curr_active_slave */
+ if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
+ bond_resend_igmp_join_requests(bond);
+ }
}
/**
@@ -2615,6 +2620,17 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
unsigned char *arp_ptr;
__be32 sip, tip;
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ /*
+ * When using VLANS and bonding, dev and oriv_dev may be
+ * incorrect if the physical interface supports VLAN
+ * acceleration. With this change ARP validation now
+ * works for hosts only reachable on the VLAN interface.
+ */
+ dev = vlan_dev_real_dev(dev);
+ orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
+ }
+
if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
goto out;
@@ -3296,7 +3312,7 @@ static void bond_remove_proc_entry(struct bonding *bond)
/* Create the bonding directory under /proc/net, if doesn't exist yet.
* Caller must hold rtnl_lock.
*/
-static void bond_create_proc_dir(struct bond_net *bn)
+static void __net_init bond_create_proc_dir(struct bond_net *bn)
{
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
@@ -3309,7 +3325,7 @@ static void bond_create_proc_dir(struct bond_net *bn)
/* Destroy the bonding directory under /proc/net, if empty.
* Caller must hold rtnl_lock.
*/
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
if (bn->proc_dir) {
remove_proc_entry(DRV_NAME, bn->net->proc_net);
@@ -3327,11 +3343,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
{
}
-static void bond_create_proc_dir(struct bond_net *bn)
+static inline void bond_create_proc_dir(struct bond_net *bn)
{
}
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
{
}
@@ -3639,7 +3655,7 @@ static int bond_open(struct net_device *bond_dev)
*/
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
/* something went wrong - fail the open operation */
- return -1;
+ return -ENOMEM;
}
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
@@ -3731,7 +3747,7 @@ static int bond_close(struct net_device *bond_dev)
static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct net_device_stats *stats = &bond->stats;
+ struct net_device_stats *stats = &bond_dev->stats;
struct net_device_stats local_stats;
struct slave *slave;
int i;
@@ -4127,22 +4143,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *start_at;
int i, slave_no, res = 1;
+ struct iphdr *iph = ip_hdr(skb);
read_lock(&bond->lock);
if (!BOND_IS_OK(bond))
goto out;
-
/*
- * Concurrent TX may collide on rr_tx_counter; we accept that
- * as being rare enough not to justify using an atomic op here
+ * Start with the curr_active_slave that joined the bond as the
+ * default for sending IGMP traffic. For failover purposes one
+ * needs to maintain some consistency for the interface that will
+ * send the join/membership reports. The curr_active_slave found
+ * will send all of this type of traffic.
*/
- slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+ if ((iph->protocol == IPPROTO_IGMP) &&
+ (skb->protocol == htons(ETH_P_IP))) {
- bond_for_each_slave(bond, slave, i) {
- slave_no--;
- if (slave_no < 0)
- break;
+ read_lock(&bond->curr_slave_lock);
+ slave = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ if (!slave)
+ goto out;
+ } else {
+ /*
+ * Concurrent TX may collide on rr_tx_counter; we accept
+ * that as being rare enough not to justify using an
+ * atomic op here.
+ */
+ slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+
+ bond_for_each_slave(bond, slave, i) {
+ slave_no--;
+ if (slave_no < 0)
+ break;
+ }
}
start_at = slave;
@@ -4415,6 +4450,14 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
};
+static void bond_destructor(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ if (bond->wq)
+ destroy_workqueue(bond->wq);
+ free_netdev(bond_dev);
+}
+
static void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -4435,7 +4478,7 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->ethtool_ops = &bond_ethtool_ops;
bond_set_mode_ops(bond, bond->params.mode);
- bond_dev->destructor = free_netdev;
+ bond_dev->destructor = bond_destructor;
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
@@ -4507,9 +4550,6 @@ static void bond_uninit(struct net_device *bond_dev)
bond_remove_proc_entry(bond);
- if (bond->wq)
- destroy_workqueue(bond->wq);
-
netif_addr_lock_bh(bond_dev);
bond_mc_list_destroy(bond);
netif_addr_unlock_bh(bond_dev);
@@ -4921,8 +4961,8 @@ int bond_create(struct net *net, const char *name)
bond_setup);
if (!bond_dev) {
pr_err("%s: eek! can't alloc netdev!\n", name);
- res = -ENOMEM;
- goto out;
+ rtnl_unlock();
+ return -ENOMEM;
}
dev_net_set(bond_dev, net);
@@ -4931,20 +4971,19 @@ int bond_create(struct net *net, const char *name)
if (!name) {
res = dev_alloc_name(bond_dev, "bond%d");
if (res < 0)
- goto out_netdev;
+ goto out;
}
res = register_netdevice(bond_dev);
out:
rtnl_unlock();
+ if (res < 0)
+ bond_destructor(bond_dev);
return res;
-out_netdev:
- free_netdev(bond_dev);
- goto out;
}
-static int bond_net_init(struct net *net)
+static int __net_init bond_net_init(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
@@ -4956,7 +4995,7 @@ static int bond_net_init(struct net *net)
return 0;
}
-static void bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5acd557cea9b..b8bec086daa1 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -51,7 +51,9 @@
* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
-static ssize_t bonding_show_bonds(struct class *cls, char *buf)
+static ssize_t bonding_show_bonds(struct class *cls,
+ struct class_attribute *attr,
+ char *buf)
{
struct net *net = current->nsproxy->net_ns;
struct bond_net *bn = net_generic(net, bond_net_id);
@@ -98,6 +100,7 @@ static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
*/
static ssize_t bonding_store_bonds(struct class *cls,
+ struct class_attribute *attr,
const char *buffer, size_t count)
{
struct net *net = current->nsproxy->net_ns;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 558ec1352527..257a7a4dfce9 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -197,7 +197,6 @@ struct bonding {
s8 send_grat_arp;
s8 send_unsol_na;
s8 setup_by_slave;
- struct net_device_stats stats;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c0..a2f29a38798a 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
mb = get_tx_next_mb(priv);
prio = get_tx_next_prio(priv);
@@ -1070,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523cc..03489864376d 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -22,94 +22,12 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <asm/bfin_can.h>
#include <asm/portmux.h>
#define DRV_NAME "bfin_can"
#define BFIN_CAN_TIMEOUT 100
-
-/*
- * transmit and receive channels
- */
-#define TRANSMIT_CHL 24
-#define RECEIVE_STD_CHL 0
-#define RECEIVE_EXT_CHL 4
-#define RECEIVE_RTR_CHL 8
-#define RECEIVE_EXT_RTR_CHL 12
-#define MAX_CHL_NUMBER 32
-
-/*
- * bfin can registers layout
- */
-struct bfin_can_mask_regs {
- u16 aml;
- u16 dummy1;
- u16 amh;
- u16 dummy2;
-};
-
-struct bfin_can_channel_regs {
- u16 data[8];
- u16 dlc;
- u16 dummy1;
- u16 tsv;
- u16 dummy2;
- u16 id0;
- u16 dummy3;
- u16 id1;
- u16 dummy4;
-};
-
-struct bfin_can_regs {
- /*
- * global control and status registers
- */
- u16 mc1; /* offset 0 */
- u16 dummy1;
- u16 md1; /* offset 4 */
- u16 rsv1[13];
- u16 mbtif1; /* offset 0x20 */
- u16 dummy2;
- u16 mbrif1; /* offset 0x24 */
- u16 dummy3;
- u16 mbim1; /* offset 0x28 */
- u16 rsv2[11];
- u16 mc2; /* offset 0x40 */
- u16 dummy4;
- u16 md2; /* offset 0x44 */
- u16 dummy5;
- u16 trs2; /* offset 0x48 */
- u16 rsv3[11];
- u16 mbtif2; /* offset 0x60 */
- u16 dummy6;
- u16 mbrif2; /* offset 0x64 */
- u16 dummy7;
- u16 mbim2; /* offset 0x68 */
- u16 rsv4[11];
- u16 clk; /* offset 0x80 */
- u16 dummy8;
- u16 timing; /* offset 0x84 */
- u16 rsv5[3];
- u16 status; /* offset 0x8c */
- u16 dummy9;
- u16 cec; /* offset 0x90 */
- u16 dummy10;
- u16 gis; /* offset 0x94 */
- u16 dummy11;
- u16 gim; /* offset 0x98 */
- u16 rsv6[3];
- u16 ctrl; /* offset 0xa0 */
- u16 dummy12;
- u16 intr; /* offset 0xa4 */
- u16 rsv7[7];
- u16 esr; /* offset 0xb4 */
- u16 rsv8[37];
-
- /*
- * channel(mailbox) mask and message registers
- */
- struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */
- struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
-};
+#define TX_ECHO_SKB_MAX 1
/*
* bfin can private data
@@ -162,7 +80,7 @@ static int bfin_can_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
timing |= SAM;
- bfin_write16(&reg->clk, clk);
+ bfin_write16(&reg->clock, clk);
bfin_write16(&reg->timing, timing);
dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
@@ -184,11 +102,11 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
bfin_write16(&reg->gim, 0);
/* reset can and enter configuration mode */
- bfin_write16(&reg->ctrl, SRS | CCR);
+ bfin_write16(&reg->control, SRS | CCR);
SSYNC();
- bfin_write16(&reg->ctrl, CCR);
+ bfin_write16(&reg->control, CCR);
SSYNC();
- while (!(bfin_read16(&reg->ctrl) & CCA)) {
+ while (!(bfin_read16(&reg->control) & CCA)) {
udelay(10);
if (--timeout == 0) {
dev_err(dev->dev.parent,
@@ -243,7 +161,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
/*
* leave configuration mode
*/
- bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) & ~CCR);
+ bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
while (bfin_read16(&reg->status) & CCA) {
udelay(10);
@@ -318,6 +236,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 val;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
/* fill id */
@@ -590,7 +511,7 @@ struct net_device *alloc_bfin_candev(void)
struct net_device *dev;
struct bfin_can_priv *priv;
- dev = alloc_candev(sizeof(*priv));
+ dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
if (!dev)
return NULL;
@@ -600,6 +521,7 @@ struct net_device *alloc_bfin_candev(void)
priv->can.bittiming_const = &bfin_can_bittiming_const;
priv->can.do_set_bittiming = bfin_can_set_bittiming;
priv->can.do_set_mode = bfin_can_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
return dev;
}
@@ -721,7 +643,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
if (netif_running(dev)) {
/* enter sleep mode */
- bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) | SMR);
+ bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
SSYNC();
while (!(bfin_read16(&reg->intr) & SMACK)) {
udelay(10);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1bb29f0322b..d0f8c7e67e7d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/can.h>
@@ -574,6 +575,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_BITTIMING_CONST]
= { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
};
static int can_changelink(struct net_device *dev,
@@ -592,6 +594,8 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ if (cm->flags & ~priv->ctrlmode_supported)
+ return -EOPNOTSUPP;
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= cm->flags;
}
@@ -647,6 +651,8 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += sizeof(struct can_berr_counter);
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += sizeof(struct can_bittiming_const);
@@ -657,6 +663,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ struct can_berr_counter bec;
enum can_state state = priv->state;
if (priv->do_get_state)
@@ -667,6 +674,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
NLA_PUT(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming);
NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
+ if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec))
+ NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec);
if (priv->bittiming_const)
NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
sizeof(*priv->bittiming_const), priv->bittiming_const);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 9c5a1537939c..b39b108318b4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -73,6 +73,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
@@ -180,6 +181,14 @@
#define RXBEID0_OFF 4
#define RXBDLC_OFF 5
#define RXBDAT_OFF 6
+#define RXFSIDH(n) ((n) * 4)
+#define RXFSIDL(n) ((n) * 4 + 1)
+#define RXFEID8(n) ((n) * 4 + 2)
+#define RXFEID0(n) ((n) * 4 + 3)
+#define RXMSIDH(n) ((n) * 4 + 0x20)
+#define RXMSIDL(n) ((n) * 4 + 0x21)
+#define RXMEID8(n) ((n) * 4 + 0x22)
+#define RXMEID0(n) ((n) * 4 + 0x23)
#define GET_BYTE(val, byte) \
(((val) >> ((byte) * 8)) & 0xff)
@@ -219,7 +228,8 @@ struct mcp251x_priv {
struct net_device *net;
struct spi_device *spi;
- struct mutex spi_lock; /* SPI buffer lock */
+ struct mutex mcp_lock; /* SPI device lock */
+
u8 *spi_tx_buf;
u8 *spi_rx_buf;
dma_addr_t spi_tx_dma;
@@ -227,11 +237,11 @@ struct mcp251x_priv {
struct sk_buff *tx_skb;
int tx_len;
+
struct workqueue_struct *wq;
struct work_struct tx_work;
- struct work_struct irq_work;
- struct completion awake;
- int wake;
+ struct work_struct restart_work;
+
int force_quit;
int after_suspend;
#define AFTER_SUSPEND_UP 1
@@ -245,7 +255,8 @@ static void mcp251x_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
- net->stats.tx_errors++;
+ if (priv->tx_skb || priv->tx_len)
+ net->stats.tx_errors++;
if (priv->tx_skb)
dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
@@ -300,16 +311,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
u8 val = 0;
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
mcp251x_spi_trans(spi, 3);
val = priv->spi_rx_buf[2];
- mutex_unlock(&priv->spi_lock);
-
return val;
}
@@ -317,15 +324,11 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = val;
mcp251x_spi_trans(spi, 3);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -333,16 +336,12 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
mcp251x_spi_trans(spi, 4);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
@@ -358,10 +357,8 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
buf[i]);
} else {
- mutex_lock(&priv->spi_lock);
memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
mcp251x_spi_trans(spi, TXBDAT_OFF + len);
- mutex_unlock(&priv->spi_lock);
}
}
@@ -408,13 +405,9 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
for (; i < (RXBDAT_OFF + len); i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
-
- mutex_unlock(&priv->spi_lock);
}
}
@@ -467,21 +460,6 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
}
-static void mcp251x_hw_wakeup(struct spi_device *spi)
-{
- struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
-
- priv->wake = 1;
-
- /* Can only wake up by generating a wake-up interrupt. */
- mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
- mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
-
- /* Wait until the device is awake */
- if (!wait_for_completion_timeout(&priv->awake, HZ))
- dev_err(&spi->dev, "MCP251x didn't wake-up\n");
-}
-
static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -490,16 +468,11 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
if (priv->tx_skb || priv->tx_len) {
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
- netif_stop_queue(net);
return NETDEV_TX_BUSY;
}
- if (skb->len != sizeof(struct can_frame)) {
- dev_err(&spi->dev, "dropping packet - bad length\n");
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
+ if (can_dropped_invalid_skb(net, skb))
return NETDEV_TX_OK;
- }
netif_stop_queue(net);
priv->tx_skb = skb;
@@ -515,12 +488,13 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
+ mcp251x_clean(net);
/* We have to delay work since SPI I/O may sleep */
priv->can.state = CAN_STATE_ERROR_ACTIVE;
priv->restart_tx = 1;
if (priv->can.restart_ms == 0)
priv->after_suspend = AFTER_SUSPEND_RESTART;
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
break;
default:
return -EOPNOTSUPP;
@@ -529,7 +503,7 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
return 0;
}
-static void mcp251x_set_normal_mode(struct spi_device *spi)
+static int mcp251x_set_normal_mode(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
unsigned long timeout;
@@ -537,12 +511,14 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
/* Enable interrupts */
mcp251x_write_reg(spi, CANINTE,
CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
- CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
- CANINTF_MERRF);
+ CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* Put device into loopback mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* Put device into listen-only mode */
+ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
} else {
/* Put device into normal mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
@@ -554,11 +530,12 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
if (time_after(jiffies, timeout)) {
dev_err(&spi->dev, "MCP251x didn't"
" enter in normal mode\n");
- return;
+ return -EBUSY;
}
}
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
}
static int mcp251x_do_set_bittiming(struct net_device *net)
@@ -589,33 +566,39 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
{
mcp251x_do_set_bittiming(net);
- /* Enable RX0->RX1 buffer roll over and disable filters */
- mcp251x_write_bits(spi, RXBCTRL(0),
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
- mcp251x_write_bits(spi, RXBCTRL(1),
- RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(0),
+ RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(1),
+ RXBCTRL_RXM0 | RXBCTRL_RXM1);
return 0;
}
-static void mcp251x_hw_reset(struct spi_device *spi)
+static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
int ret;
-
- mutex_lock(&priv->spi_lock);
+ unsigned long timeout;
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-
ret = spi_write(spi, priv->spi_tx_buf, 1);
-
- mutex_unlock(&priv->spi_lock);
-
- if (ret)
+ if (ret) {
dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
+ return -EIO;
+ }
+
/* Wait for reset to finish */
+ timeout = jiffies + HZ;
mdelay(10);
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
+ != CANCTRL_REQOP_CONF) {
+ schedule();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev, "MCP251x didn't"
+ " enter in conf mode after reset\n");
+ return -EBUSY;
+ }
+ }
+ return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
@@ -639,63 +622,17 @@ static int mcp251x_hw_probe(struct spi_device *spi)
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
}
-static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
-{
- struct net_device *net = (struct net_device *)dev_id;
- struct mcp251x_priv *priv = netdev_priv(net);
-
- /* Schedule bottom half */
- if (!work_pending(&priv->irq_work))
- queue_work(priv->wq, &priv->irq_work);
-
- return IRQ_HANDLED;
-}
-
-static int mcp251x_open(struct net_device *net)
+static void mcp251x_open_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- int ret;
-
- ret = open_candev(net);
- if (ret) {
- dev_err(&spi->dev, "unable to set initial baudrate!\n");
- return ret;
- }
+ free_irq(spi->irq, priv);
+ mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
-
- priv->force_quit = 0;
- priv->tx_skb = NULL;
- priv->tx_len = 0;
-
- ret = request_irq(spi->irq, mcp251x_can_isr,
- IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
- if (ret) {
- dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
-
- mcp251x_hw_wakeup(spi);
- mcp251x_hw_reset(spi);
- ret = mcp251x_setup(net, priv, spi);
- if (ret) {
- free_irq(spi->irq, net);
- mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
- mcp251x_set_normal_mode(spi);
- netif_wake_queue(net);
-
- return 0;
+ pdata->transceiver_enable(0);
+ close_candev(net);
}
static int mcp251x_stop(struct net_device *net)
@@ -706,17 +643,19 @@ static int mcp251x_stop(struct net_device *net)
close_candev(net);
+ priv->force_quit = 1;
+ free_irq(spi->irq, priv);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
+ mutex_lock(&priv->mcp_lock);
+
/* Disable and clear pending interrupts */
mcp251x_write_reg(spi, CANINTE, 0x00);
mcp251x_write_reg(spi, CANINTF, 0x00);
- priv->force_quit = 1;
- free_irq(spi->irq, net);
- flush_workqueue(priv->wq);
-
mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
+ mcp251x_clean(net);
mcp251x_hw_sleep(spi);
@@ -725,9 +664,27 @@ static int mcp251x_stop(struct net_device *net)
priv->can.state = CAN_STATE_STOPPED;
+ mutex_unlock(&priv->mcp_lock);
+
return 0;
}
+static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
+{
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_err_skb(net, &frame);
+ if (skb) {
+ frame->can_id = can_id;
+ frame->data[1] = data1;
+ netif_rx(skb);
+ } else {
+ dev_err(&net->dev,
+ "cannot allocate error skb\n");
+ }
+}
+
static void mcp251x_tx_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
@@ -736,33 +693,32 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
struct net_device *net = priv->net;
struct can_frame *frame;
+ mutex_lock(&priv->mcp_lock);
if (priv->tx_skb) {
- frame = (struct can_frame *)priv->tx_skb->data;
-
if (priv->can.state == CAN_STATE_BUS_OFF) {
mcp251x_clean(net);
- netif_wake_queue(net);
- return;
+ } else {
+ frame = (struct can_frame *)priv->tx_skb->data;
+
+ if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+ frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+ mcp251x_hw_tx(spi, frame, 0);
+ priv->tx_len = 1 + frame->can_dlc;
+ can_put_echo_skb(priv->tx_skb, net, 0);
+ priv->tx_skb = NULL;
}
- if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
- frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
- mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->can_dlc;
- can_put_echo_skb(priv->tx_skb, net, 0);
- priv->tx_skb = NULL;
}
+ mutex_unlock(&priv->mcp_lock);
}
-static void mcp251x_irq_work_handler(struct work_struct *ws)
+static void mcp251x_restart_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
- irq_work);
+ restart_work);
struct spi_device *spi = priv->spi;
struct net_device *net = priv->net;
- u8 txbnctrl;
- u8 intf;
- enum can_state new_state;
+ mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
mdelay(10);
mcp251x_hw_reset(spi);
@@ -771,45 +727,54 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
netif_device_attach(net);
- /* Clean since we lost tx buffer */
- if (priv->tx_skb || priv->tx_len) {
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
+ mcp251x_clean(net);
mcp251x_set_normal_mode(spi);
+ netif_wake_queue(net);
} else {
mcp251x_hw_sleep(spi);
}
priv->after_suspend = 0;
+ priv->force_quit = 0;
}
- if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
- return;
+ if (priv->restart_tx) {
+ priv->restart_tx = 0;
+ mcp251x_write_reg(spi, TXBCTRL(0), 0);
+ mcp251x_clean(net);
+ netif_wake_queue(net);
+ mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+}
- while (!priv->force_quit && !freezing(current)) {
- u8 eflag = mcp251x_read_reg(spi, EFLG);
- int can_id = 0, data1 = 0;
+static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+{
+ struct mcp251x_priv *priv = dev_id;
+ struct spi_device *spi = priv->spi;
+ struct net_device *net = priv->net;
- mcp251x_write_reg(spi, EFLG, 0x00);
+ mutex_lock(&priv->mcp_lock);
+ while (!priv->force_quit) {
+ enum can_state new_state;
+ u8 intf = mcp251x_read_reg(spi, CANINTF);
+ u8 eflag;
+ int can_id = 0, data1 = 0;
- if (priv->restart_tx) {
- priv->restart_tx = 0;
- mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- can_id |= CAN_ERR_RESTARTED;
+ if (intf & CANINTF_RX0IF) {
+ mcp251x_hw_rx(spi, 0);
+ /* Free one buffer ASAP */
+ mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF,
+ 0x00);
}
- if (priv->wake) {
- /* Wait whilst the device wakes up */
- mdelay(10);
- priv->wake = 0;
- }
+ if (intf & CANINTF_RX1IF)
+ mcp251x_hw_rx(spi, 1);
- intf = mcp251x_read_reg(spi, CANINTF);
mcp251x_write_bits(spi, CANINTF, intf, 0x00);
+ eflag = mcp251x_read_reg(spi, EFLG);
+ mcp251x_write_reg(spi, EFLG, 0x00);
+
/* Update can state */
if (eflag & EFLG_TXBO) {
new_state = CAN_STATE_BUS_OFF;
@@ -850,59 +815,31 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
}
priv->can.state = new_state;
- if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
- struct sk_buff *skb;
- struct can_frame *frame;
-
- /* Create error frame */
- skb = alloc_can_err_skb(net, &frame);
- if (skb) {
- /* Set error frame flags based on bus state */
- frame->can_id = can_id;
- frame->data[1] = data1;
-
- /* Update net stats for overflows */
- if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
- if (eflag & EFLG_RX0OVR)
- net->stats.rx_over_errors++;
- if (eflag & EFLG_RX1OVR)
- net->stats.rx_over_errors++;
- frame->can_id |= CAN_ERR_CRTL;
- frame->data[1] |=
- CAN_ERR_CRTL_RX_OVERFLOW;
- }
-
- netif_rx(skb);
- } else {
- dev_info(&spi->dev,
- "cannot allocate error skb\n");
+ if (intf & CANINTF_ERRIF) {
+ /* Handle overflow counters */
+ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
+ if (eflag & EFLG_RX0OVR)
+ net->stats.rx_over_errors++;
+ if (eflag & EFLG_RX1OVR)
+ net->stats.rx_over_errors++;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
}
+ mcp251x_error_skb(net, can_id, data1);
}
if (priv->can.state == CAN_STATE_BUS_OFF) {
if (priv->can.restart_ms == 0) {
+ priv->force_quit = 1;
can_bus_off(net);
mcp251x_hw_sleep(spi);
- return;
+ break;
}
}
if (intf == 0)
break;
- if (intf & CANINTF_WAKIF)
- complete(&priv->awake);
-
- if (intf & CANINTF_MERRF) {
- /* If there are pending Tx buffers, restart queue */
- txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
- if (!(txbnctrl & TXBCTRL_TXREQ)) {
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
- }
-
if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
@@ -913,12 +850,66 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
netif_wake_queue(net);
}
- if (intf & CANINTF_RX0IF)
- mcp251x_hw_rx(spi, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+ return IRQ_HANDLED;
+}
- if (intf & CANINTF_RX1IF)
- mcp251x_hw_rx(spi, 1);
+static int mcp251x_open(struct net_device *net)
+{
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+ struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int ret;
+
+ ret = open_candev(net);
+ if (ret) {
+ dev_err(&spi->dev, "unable to set initial baudrate!\n");
+ return ret;
}
+
+ mutex_lock(&priv->mcp_lock);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(1);
+
+ priv->force_quit = 0;
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+
+ ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
+ IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+ if (ret) {
+ dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(0);
+ close_candev(net);
+ goto open_unlock;
+ }
+
+ priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+ INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+
+ ret = mcp251x_hw_reset(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ ret = mcp251x_setup(net, priv, spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ ret = mcp251x_set_normal_mode(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ netif_wake_queue(net);
+
+open_unlock:
+ mutex_unlock(&priv->mcp_lock);
+ return ret;
}
static const struct net_device_ops mcp251x_netdev_ops = {
@@ -952,11 +943,13 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
priv->can.bittiming_const = &mcp251x_bittiming_const;
priv->can.do_set_mode = mcp251x_do_set_mode;
priv->can.clock.freq = pdata->oscillator_frequency / 2;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->net = net;
dev_set_drvdata(&spi->dev, priv);
priv->spi = spi;
- mutex_init(&priv->spi_lock);
+ mutex_init(&priv->mcp_lock);
/* If requested, allocate DMA buffers */
if (mcp251x_enable_dma) {
@@ -990,7 +983,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
goto error_tx_buf;
}
priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
- if (!priv->spi_tx_buf) {
+ if (!priv->spi_rx_buf) {
ret = -ENOMEM;
goto error_rx_buf;
}
@@ -1005,18 +998,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
-
- INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
- INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
-
- init_completion(&priv->awake);
-
/* Configure the SPI bus */
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
spi_setup(spi);
+ /* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) {
dev_info(&spi->dev, "Probe failed\n");
goto error_probe;
@@ -1059,10 +1046,6 @@ static int __devexit mcp251x_can_remove(struct spi_device *spi)
unregister_candev(net);
free_candev(net);
- priv->force_quit = 1;
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
@@ -1084,6 +1067,12 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
struct net_device *net = priv->net;
+ priv->force_quit = 1;
+ disable_irq(spi->irq);
+ /*
+ * Note: at this point neither IST nor workqueues are running.
+ * open/stop cannot be called anyway so locking is not needed
+ */
if (netif_running(net)) {
netif_device_detach(net);
@@ -1110,16 +1099,18 @@ static int mcp251x_can_resume(struct spi_device *spi)
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
pdata->power_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
if (priv->after_suspend & AFTER_SUSPEND_UP) {
if (pdata->transceiver_enable)
pdata->transceiver_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
priv->after_suspend = 0;
}
}
+ priv->force_quit = 0;
+ enable_irq(spi->irq);
return 0;
}
#else
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375d..27d1d398e25e 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
config CAN_MPC5XXX
tristate "Freescale MPC5xxx onboard CAN controller"
- depends on PPC_MPC52xx
+ depends on (PPC_MPC52xx || PPC_MPC512x)
---help---
If you say yes here you get support for Freescale's MPC5xxx
- onboard CAN controller.
+ onboard CAN controller. Currently, the MPC5200, MPC5200B and
+ MPC5121 (Rev. 2 and later) are supported.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called mscan-mpc5xxx.ko.
endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b16..03e7c48465a2 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mpc52xx.h>
@@ -36,22 +37,21 @@
#define DRV_NAME "mpc5xxx_can"
-static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
+struct mpc5xxx_can_data {
+ unsigned int type;
+ u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+ int *mscan_clksrc);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
{ .compatible = "fsl,mpc5200-cdm", },
{}
};
-/*
- * Get frequency of the MSCAN clock source
- *
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
- * can be selected. According to the MPC5200 user's manual, the oscillator
- * clock is the better choice as it has less jitter but due to a hardware
- * bug, it can not be selected for the old MPC5200 Rev. A chips.
- */
-
-static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
- int clock_src)
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
{
unsigned int pvr;
struct mpc52xx_cdm __iomem *cdm;
@@ -61,21 +61,33 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
pvr = mfspr(SPRN_PVR);
- freq = mpc5xxx_get_bus_frequency(of->node);
+ /*
+ * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+ * (IP_CLK) can be selected as MSCAN clock source. According to
+ * the MPC5200 user's manual, the oscillator clock is the better
+ * choice as it has less jitter. For this reason, it is selected
+ * by default. Unfortunately, it can not be selected for the old
+ * MPC5200 Rev. A chips due to a hardware bug (check errata).
+ */
+ if (clock_name && strcmp(clock_name, "ip") == 0)
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+ else
+ *mscan_clksrc = MSCAN_CLKSRC_XTAL;
+
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
if (!freq)
return 0;
- if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+ if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
return freq;
/* Determine SYS_XTAL_IN frequency from the clock domain settings */
np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
if (!np_cdm) {
- dev_err(&of->dev, "can't get clock node!\n");
+ dev_err(&ofdev->dev, "can't get clock node!\n");
return 0;
}
cdm = of_iomap(np_cdm, 0);
- of_node_put(np_cdm);
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
freq *= (val & (1 << 5)) ? 8 : 4;
freq /= (val & (1 << 6)) ? 12 : 16;
+ of_node_put(np_cdm);
iounmap(cdm);
return freq;
}
+#else /* !CONFIG_PPC_MPC52xx */
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+struct mpc512x_clockctl {
+ u32 spmr; /* System PLL Mode Reg */
+ u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
+ u32 scfr1; /* System Clk Freq Reg 1 */
+ u32 scfr2; /* System Clk Freq Reg 2 */
+ u32 reserved;
+ u32 bcr; /* Bread Crumb Reg */
+ u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
+ u32 spccr; /* SPDIF Clk Ctrl Reg */
+ u32 cccr; /* CFM Clk Ctrl Reg */
+ u32 dccr; /* DIU Clk Cnfg Reg */
+ u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
+};
+
+static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
+ { .compatible = "fsl,mpc5121-clock", },
+ {}
+};
+
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ struct mpc512x_clockctl __iomem *clockctl;
+ struct device_node *np_clock;
+ struct clk *sys_clk, *ref_clk;
+ int plen, clockidx, clocksrc = -1;
+ u32 sys_freq, val, clockdiv = 1, freq = 0;
+ const u32 *pval;
+
+ np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
+ if (!np_clock) {
+ dev_err(&ofdev->dev, "couldn't find clock node\n");
+ return -ENODEV;
+ }
+ clockctl = of_iomap(np_clock, 0);
+ if (!clockctl) {
+ dev_err(&ofdev->dev, "couldn't map clock registers\n");
+ return 0;
+ }
+
+ /* Determine the MSCAN device index from the physical address */
+ pval = of_get_property(ofdev->node, "reg", &plen);
+ BUG_ON(!pval || plen < sizeof(*pval));
+ clockidx = (*pval & 0x80) ? 1 : 0;
+ if (*pval & 0x2000)
+ clockidx += 2;
+
+ /*
+ * Clock source and divider selection: 3 different clock sources
+ * can be selected: "ip", "ref" or "sys". For the latter two, a
+ * clock divider can be defined as well. If the clock source is
+ * not specified by the device tree, we first try to find an
+ * optimal CAN source clock based on the system clock. If that
+ * is not posslible, the reference clock will be used.
+ */
+ if (clock_name && !strcmp(clock_name, "ip")) {
+ *mscan_clksrc = MSCAN_CLKSRC_IPS;
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
+ } else {
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+
+ pval = of_get_property(ofdev->node,
+ "fsl,mscan-clock-divider", &plen);
+ if (pval && plen == sizeof(*pval))
+ clockdiv = *pval;
+ if (!clockdiv)
+ clockdiv = 1;
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+ if (!sys_clk) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+ /* Get and round up/down sys clock rate */
+ sys_freq = 1000000 *
+ ((clk_get_rate(sys_clk) + 499999) / 1000000);
+
+ if (!clock_name) {
+ /* A multiple of 16 MHz would be optimal */
+ if ((sys_freq % 16000000) == 0) {
+ clocksrc = 0;
+ clockdiv = sys_freq / 16000000;
+ freq = sys_freq / clockdiv;
+ }
+ } else {
+ clocksrc = 0;
+ freq = sys_freq / clockdiv;
+ }
+ }
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+ if (!ref_clk) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+ clocksrc = 1;
+ freq = clk_get_rate(ref_clk) / clockdiv;
+ }
+ }
+
+ /* Disable clock */
+ out_be32(&clockctl->mccr[clockidx], 0x0);
+ if (clocksrc >= 0) {
+ /* Set source and divider */
+ val = (clocksrc << 14) | ((clockdiv - 1) << 17);
+ out_be32(&clockctl->mccr[clockidx], val);
+ /* Enable clock */
+ out_be32(&clockctl->mccr[clockidx], val | 0x10000);
+ }
+
+ /* Enable MSCAN clock domain */
+ val = in_be32(&clockctl->sccr[1]);
+ if (!(val & (1 << 25)))
+ out_be32(&clockctl->sccr[1], val | (1 << 25));
+
+ dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
+ *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
+ clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+
+exit_unmap:
+ of_node_put(np_clock);
+ iounmap(clockctl);
+
+ return freq;
+}
+#else /* !CONFIG_PPC_MPC512x */
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
+ struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
struct device_node *np = ofdev->node;
struct net_device *dev;
struct mscan_priv *priv;
void __iomem *base;
- const char *clk_src;
- int err, irq, clock_src;
+ const char *clock_name = NULL;
+ int irq, mscan_clksrc = 0;
+ int err = -ENOMEM;
- base = of_iomap(ofdev->node, 0);
+ base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap\n");
- err = -ENOMEM;
- goto exit_release_mem;
+ return err;
}
irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
}
dev = alloc_mscandev();
- if (!dev) {
- err = -ENOMEM;
+ if (!dev)
goto exit_dispose_irq;
- }
priv = netdev_priv(dev);
priv->reg_base = base;
dev->irq = irq;
- /*
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
- * (IP_CLK) can be selected as MSCAN clock source. According to
- * the MPC5200 user's manual, the oscillator clock is the better
- * choice as it has less jitter. For this reason, it is selected
- * by default.
- */
- clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
- if (clk_src && strcmp(clk_src, "ip") == 0)
- clock_src = MSCAN_CLKSRC_BUS;
- else
- clock_src = MSCAN_CLKSRC_XTAL;
- priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
+ clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
+
+ BUG_ON(!data);
+ priv->type = data->type;
+ priv->can.clock.freq = data->get_clock(ofdev, clock_name,
+ &mscan_clksrc);
if (!priv->can.clock.freq) {
- dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
- err = -ENODEV;
+ dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
goto exit_free_mscan;
}
SET_NETDEV_DEV(dev, &ofdev->dev);
- err = register_mscandev(dev, clock_src);
+ err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
irq_dispose_mapping(irq);
exit_unmap_mem:
iounmap(base);
-exit_release_mem:
+
return err;
}
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
}
#endif
+static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
+ .type = MSCAN_TYPE_MPC5200,
+ .get_clock = mpc52xx_can_get_clock,
+};
+
+static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
+ .type = MSCAN_TYPE_MPC5121,
+ .get_clock = mpc512x_can_get_clock,
+};
+
static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
- {.compatible = "fsl,mpc5200-mscan"},
+ { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
+ /* Note that only MPC5121 Rev. 2 (and later) is supported */
+ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
{},
};
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
module_exit(mpc5xxx_can_exit);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
+MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca6..6b7dd578d417 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
* Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
+ * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
priv->shadow_canrier = 0;
priv->flags = 0;
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ /* Clear pending bus-off condition */
+ if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ }
+
err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
if (err)
return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
out_8(&regs->cantier, 0);
/* Enable receive interrupts. */
- out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
- MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+
+ return 0;
+}
+
+static int mscan_restart(struct net_device *dev)
+{
+ struct mscan_priv *priv = netdev_priv(dev);
+
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
+ "bus-off state expected");
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ /* Re-enable receive interrupts. */
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+ } else {
+ if (priv->can.state <= CAN_STATE_BUS_OFF)
+ mscan_set_mode(dev, MSCAN_INIT_MODE);
+ return mscan_start(dev);
+ }
return 0;
}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (frame->can_dlc > 8)
- return -EINVAL;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
* automatically. To avoid that we stop the chip doing
* a light-weight stop (we are in irq-context).
*/
- out_8(&regs->cantier, 0);
- out_8(&regs->canrier, 0);
- setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
+ if (priv->type != MSCAN_TYPE_MPC5121) {
+ out_8(&regs->cantier, 0);
+ out_8(&regs->canrier, 0);
+ setbits8(&regs->canctl0,
+ MSCAN_SLPRQ | MSCAN_INITRQ);
+ }
can_bus_off(dev);
break;
default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
- if (priv->can.state <= CAN_STATE_BUS_OFF)
- mscan_set_mode(dev, MSCAN_INIT_MODE);
- ret = mscan_start(dev);
+ ret = mscan_restart(dev);
if (ret)
break;
if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_start_xmit = mscan_start_xmit,
};
-int register_mscandev(struct net_device *dev, int clock_src)
+int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
u8 ctl1;
ctl1 = in_8(&regs->canctl1);
- if (clock_src)
+ if (mscan_clksrc)
ctl1 |= MSCAN_CLKSRC;
else
ctl1 &= ~MSCAN_CLKSRC;
+ if (priv->type == MSCAN_TYPE_MPC5121)
+ ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
+
ctl1 |= MSCAN_CANE;
out_8(&regs->canctl1, ctl1);
udelay(100);
@@ -655,6 +686,7 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed8..4ff966473bc9 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
#define MSCAN_CLKSRC 0x40
#define MSCAN_LOOPB 0x20
#define MSCAN_LISTEN 0x10
+#define MSCAN_BORM 0x08
#define MSCAN_WUPM 0x04
#define MSCAN_SLPAK 0x02
#define MSCAN_INITAK 0x01
-/* Use the MPC5200 MSCAN variant? */
+/* Use the MPC5XXX MSCAN variant? */
#ifdef CONFIG_PPC
-#define MSCAN_FOR_MPC5200
+#define MSCAN_FOR_MPC5XXX
#endif
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define MSCAN_CLKSRC_BUS 0
#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
+#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
#else
#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
#define MSCAN_EFF_RTR_SHIFT 0
#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
#define _MSCAN_RESERVED_DSR_SIZE 2
#else
@@ -165,67 +167,66 @@ struct mscan_regs {
u8 cantbsel; /* + 0x14 0x0a */
u8 canidac; /* + 0x15 0x0b */
u8 reserved; /* + 0x16 0x0c */
- _MSCAN_RESERVED_(6, 5); /* + 0x17 */
-#ifndef MSCAN_FOR_MPC5200
- u8 canmisc; /* 0x0d */
-#endif
+ _MSCAN_RESERVED_(6, 2); /* + 0x17 */
+ u8 canmisc; /* + 0x19 0x0d */
+ _MSCAN_RESERVED_(7, 2); /* + 0x1a */
u8 canrxerr; /* + 0x1c 0x0e */
u8 cantxerr; /* + 0x1d 0x0f */
- _MSCAN_RESERVED_(7, 2); /* + 0x1e */
+ _MSCAN_RESERVED_(8, 2); /* + 0x1e */
u16 canidar1_0; /* + 0x20 0x10 */
- _MSCAN_RESERVED_(8, 2); /* + 0x22 */
+ _MSCAN_RESERVED_(9, 2); /* + 0x22 */
u16 canidar3_2; /* + 0x24 0x12 */
- _MSCAN_RESERVED_(9, 2); /* + 0x26 */
+ _MSCAN_RESERVED_(10, 2); /* + 0x26 */
u16 canidmr1_0; /* + 0x28 0x14 */
- _MSCAN_RESERVED_(10, 2); /* + 0x2a */
+ _MSCAN_RESERVED_(11, 2); /* + 0x2a */
u16 canidmr3_2; /* + 0x2c 0x16 */
- _MSCAN_RESERVED_(11, 2); /* + 0x2e */
+ _MSCAN_RESERVED_(12, 2); /* + 0x2e */
u16 canidar5_4; /* + 0x30 0x18 */
- _MSCAN_RESERVED_(12, 2); /* + 0x32 */
+ _MSCAN_RESERVED_(13, 2); /* + 0x32 */
u16 canidar7_6; /* + 0x34 0x1a */
- _MSCAN_RESERVED_(13, 2); /* + 0x36 */
+ _MSCAN_RESERVED_(14, 2); /* + 0x36 */
u16 canidmr5_4; /* + 0x38 0x1c */
- _MSCAN_RESERVED_(14, 2); /* + 0x3a */
+ _MSCAN_RESERVED_(15, 2); /* + 0x3a */
u16 canidmr7_6; /* + 0x3c 0x1e */
- _MSCAN_RESERVED_(15, 2); /* + 0x3e */
+ _MSCAN_RESERVED_(16, 2); /* + 0x3e */
struct {
u16 idr1_0; /* + 0x40 0x20 */
- _MSCAN_RESERVED_(16, 2); /* + 0x42 */
+ _MSCAN_RESERVED_(17, 2); /* + 0x42 */
u16 idr3_2; /* + 0x44 0x22 */
- _MSCAN_RESERVED_(17, 2); /* + 0x46 */
+ _MSCAN_RESERVED_(18, 2); /* + 0x46 */
u16 dsr1_0; /* + 0x48 0x24 */
- _MSCAN_RESERVED_(18, 2); /* + 0x4a */
+ _MSCAN_RESERVED_(19, 2); /* + 0x4a */
u16 dsr3_2; /* + 0x4c 0x26 */
- _MSCAN_RESERVED_(19, 2); /* + 0x4e */
+ _MSCAN_RESERVED_(20, 2); /* + 0x4e */
u16 dsr5_4; /* + 0x50 0x28 */
- _MSCAN_RESERVED_(20, 2); /* + 0x52 */
+ _MSCAN_RESERVED_(21, 2); /* + 0x52 */
u16 dsr7_6; /* + 0x54 0x2a */
- _MSCAN_RESERVED_(21, 2); /* + 0x56 */
+ _MSCAN_RESERVED_(22, 2); /* + 0x56 */
u8 dlr; /* + 0x58 0x2c */
- u8:8; /* + 0x59 0x2d */
- _MSCAN_RESERVED_(22, 2); /* + 0x5a */
+ u8 reserved; /* + 0x59 0x2d */
+ _MSCAN_RESERVED_(23, 2); /* + 0x5a */
u16 time; /* + 0x5c 0x2e */
} rx;
- _MSCAN_RESERVED_(23, 2); /* + 0x5e */
+ _MSCAN_RESERVED_(24, 2); /* + 0x5e */
struct {
u16 idr1_0; /* + 0x60 0x30 */
- _MSCAN_RESERVED_(24, 2); /* + 0x62 */
+ _MSCAN_RESERVED_(25, 2); /* + 0x62 */
u16 idr3_2; /* + 0x64 0x32 */
- _MSCAN_RESERVED_(25, 2); /* + 0x66 */
+ _MSCAN_RESERVED_(26, 2); /* + 0x66 */
u16 dsr1_0; /* + 0x68 0x34 */
- _MSCAN_RESERVED_(26, 2); /* + 0x6a */
+ _MSCAN_RESERVED_(27, 2); /* + 0x6a */
u16 dsr3_2; /* + 0x6c 0x36 */
- _MSCAN_RESERVED_(27, 2); /* + 0x6e */
+ _MSCAN_RESERVED_(28, 2); /* + 0x6e */
u16 dsr5_4; /* + 0x70 0x38 */
- _MSCAN_RESERVED_(28, 2); /* + 0x72 */
+ _MSCAN_RESERVED_(29, 2); /* + 0x72 */
u16 dsr7_6; /* + 0x74 0x3a */
- _MSCAN_RESERVED_(29, 2); /* + 0x76 */
+ _MSCAN_RESERVED_(30, 2); /* + 0x76 */
u8 dlr; /* + 0x78 0x3c */
u8 tbpr; /* + 0x79 0x3d */
- _MSCAN_RESERVED_(30, 2); /* + 0x7a */
+ _MSCAN_RESERVED_(31, 2); /* + 0x7a */
u16 time; /* + 0x7c 0x3e */
} tx;
- _MSCAN_RESERVED_(31, 2); /* + 0x7e */
+ _MSCAN_RESERVED_(32, 2); /* + 0x7e */
} __attribute__ ((packed));
#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
#define MSCAN_SET_MODE_RETRIES 255
#define MSCAN_ECHO_SKB_MAX 3
+#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
+ MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
+ MSCAN_TSTATE1 | MSCAN_TSTATE0)
+
+/* MSCAN type variants */
+enum {
+ MSCAN_TYPE_MPC5200,
+ MSCAN_TYPE_MPC5121
+};
#define BTR0_BRP_MASK 0x3f
#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
struct mscan_priv {
struct can_priv can; /* must be the first member */
+ unsigned int type; /* MSCAN type variants */
long open_time;
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
};
extern struct net_device *alloc_mscandev(void);
-/*
- * clock_src:
- * 1 = The MSCAN clock source is the onchip Bus Clock.
- * 0 = The MSCAN clock source is the chip Oscillator Clock.
- */
-extern int register_mscandev(struct net_device *dev, int clock_src);
+extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
extern void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4c674927f247..9e277d64a318 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -44,4 +44,16 @@ config CAN_KVASER_PCI
This driver is for the the PCIcanx and PCIcan cards (1, 2 or
4 channel) from Kvaser (http://www.kvaser.com).
+config CAN_PLX_PCI
+ tristate "PLX90xx PCI-bridge based Cards"
+ depends on PCI
+ ---help---
+ This driver is for CAN interface cards based on
+ the PLX90xx PCI bridge.
+ Driver supports now:
+ - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
+ - Adlink PCI-7841/cPCI-7841 SE card
+ - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
+ - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9d245ac03965..ce924553995d 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
+obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d3370..5f53da0bc40c 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -102,7 +103,7 @@ struct ems_pci_card {
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
-static struct pci_device_id ems_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
/* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b9713..441e776a7f59 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
#define KVASER_PCI_DEVICE_ID2 0x0008
-static struct pci_device_id kvaser_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
{KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
{KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
{ 0,}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
new file mode 100644
index 000000000000..4aff4070db96
--- /dev/null
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
+ *
+ * Derived from the ems_pci.c driver:
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "sja1000_plx_pci"
+
+MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
+MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
+ "the SJA1000 chips");
+MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
+ "Adlink PCI-7841/cPCI-7841 SE, "
+ "Marathon CAN-bus-PCI, "
+ "TEWS TECHNOLOGIES TPMC810");
+MODULE_LICENSE("GPL v2");
+
+#define PLX_PCI_MAX_CHAN 2
+
+struct plx_pci_card {
+ int channels; /* detected channels count */
+ struct net_device *net_dev[PLX_PCI_MAX_CHAN];
+ void __iomem *conf_addr;
+};
+
+#define PLX_PCI_CAN_CLOCK (16000000 / 2)
+
+/* PLX90xx registers */
+#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
+#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
+ * Serial EEPROM, and Initialization
+ * Control register
+ */
+
+#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
+#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
+#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
+#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+
+/* SJA1000 Control Register in the BasicCAN Mode */
+#define REG_CR 0x00
+
+/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
+#define REG_CR_BASICCAN_INITIAL 0x21
+#define REG_CR_BASICCAN_INITIAL_MASK 0xa1
+#define REG_SR_BASICCAN_INITIAL 0x0c
+#define REG_IR_BASICCAN_INITIAL 0xe0
+
+/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
+#define REG_MOD_PELICAN_INITIAL 0x01
+#define REG_SR_PELICAN_INITIAL 0x3c
+#define REG_IR_PELICAN_INITIAL 0x00
+
+#define ADLINK_PCI_VENDOR_ID 0x144A
+#define ADLINK_PCI_DEVICE_ID 0x7841
+
+#define MARATHON_PCI_DEVICE_ID 0x2715
+
+#define TEWS_PCI_VENDOR_ID 0x1498
+#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
+
+static void plx_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon(struct pci_dev *pdev);
+
+struct plx_pci_channel_map {
+ u32 bar;
+ u32 offset;
+ u32 size; /* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct plx_pci_card_info {
+ const char *name;
+ int channel_count;
+ u32 can_clock;
+ u8 ocr; /* output control register */
+ u8 cdr; /* clock divider register */
+
+ /* Parameters for mapping local configuration space */
+ struct plx_pci_channel_map conf_map;
+
+ /* Parameters for mapping the SJA1000 chips */
+ struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841 SE", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
+ "Marathon CAN-bus-PCI", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
+ &plx_pci_reset_marathon
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
+ "TEWS TECHNOLOGIES TPMC810", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
+ {
+ /* Adlink PCI-7841/cPCI-7841 */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink
+ },
+ {
+ /* Adlink PCI-7841/cPCI-7841 SE */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink_se
+ },
+ {
+ /* Marathon CAN-bus-PCI card */
+ PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_marathon
+ },
+ {
+ /* TEWS TECHNOLOGIES TPMC810 card */
+ TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_tews
+ },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
+
+static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return ioread8(priv->reg_base + port);
+}
+
+static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
+{
+ iowrite8(val, priv->reg_base + port);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to switch 'em from the Basic mode into the PeliCAN mode.
+ * Also check states of some registers in reset mode.
+ */
+static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+{
+ int flag = 0;
+
+ /*
+ * Check registers after hardware reset (the Basic mode)
+ * See states on p. 10 of the Datasheet.
+ */
+ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+ REG_CR_BASICCAN_INITIAL &&
+ (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
+ flag = 1;
+
+ /* Bring the SJA1000 into the PeliCAN mode*/
+ priv->write_reg(priv, REG_CDR, CDR_PELICAN);
+
+ /*
+ * Check registers after reset in the PeliCAN mode.
+ * See states on p. 23 of the Datasheet.
+ */
+ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
+ return flag;
+
+ return 0;
+}
+
+/*
+ * PLX90xx software reset
+ * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
+ * For most cards it's enough for reset the SJA1000 chips.
+ */
+static void plx_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ cntrl = ioread32(card->conf_addr + PLX_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+};
+
+/* Special reset function for Marathon card */
+static void plx_pci_reset_marathon(struct pci_dev *pdev)
+{
+ void __iomem *reset_addr;
+ int i;
+ int reset_bar[2] = {3, 5};
+
+ plx_pci_reset_common(pdev);
+
+ for (i = 0; i < 2; i++) {
+ reset_addr = pci_iomap(pdev, reset_bar[i], 0);
+ if (!reset_addr) {
+ dev_err(&pdev->dev, "Failed to remap reset "
+ "space %d (BAR%d)\n", i, reset_bar[i]);
+ } else {
+ /* reset the SJA1000 chip */
+ iowrite8(0x1, reset_addr);
+ udelay(100);
+ pci_iounmap(pdev, reset_addr);
+ }
+ }
+}
+
+static void plx_pci_del_card(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ int i = 0;
+
+ for (i = 0; i < card->channels; i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ dev_info(&pdev->dev, "Removing %s\n", dev->name);
+ unregister_sja1000dev(dev);
+ priv = netdev_priv(dev);
+ if (priv->reg_base)
+ pci_iounmap(pdev, priv->reg_base);
+ free_sja1000dev(dev);
+ }
+
+ plx_pci_reset_common(pdev);
+
+ /*
+ * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
+ * Local_2 interrupts
+ */
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+
+ if (card->conf_addr)
+ pci_iounmap(pdev, card->conf_addr);
+
+ kfree(card);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/*
+ * Probe PLX90xx based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int __devinit plx_pci_add_card(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct plx_pci_card *card;
+ struct plx_pci_card_info *ci;
+ int err, i;
+ u32 val;
+ void __iomem *addr;
+
+ ci = (struct plx_pci_card_info *)ent->driver_data;
+
+ if (pci_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
+ ci->name, PCI_SLOT(pdev->devfn));
+
+ /* Allocate card structures to hold addresses, ... */
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ card->channels = 0;
+
+ /* Remap PLX90xx configuration space */
+ addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap configuration space "
+ "(BAR%d)\n", ci->conf_map.bar);
+ goto failure_cleanup;
+ }
+ card->conf_addr = addr + ci->conf_map.offset;
+
+ ci->reset_func(pdev);
+
+ /* Detect available channels */
+ for (i = 0; i < ci->channel_count; i++) {
+ struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
+
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ priv->irq_flags = IRQF_SHARED;
+
+ dev->irq = pdev->irq;
+
+ /*
+ * Remap IO space of the SJA1000 chips
+ * This is device-dependent mapping
+ */
+ addr = pci_iomap(pdev, cm->bar, cm->size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
+ goto failure_cleanup;
+ }
+
+ priv->reg_base = addr + cm->offset;
+ priv->read_reg = plx_pci_read_reg;
+ priv->write_reg = plx_pci_write_reg;
+
+ /* Check if channel is present */
+ if (plx_pci_check_sja1000(priv)) {
+ priv->can.clock.freq = ci->can_clock;
+ priv->ocr = ci->ocr;
+ priv->cdr = ci->cdr;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Registering device failed "
+ "(err=%d)\n", err);
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->channels++;
+
+ dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+ "registered as %s\n", i + 1, priv->reg_base,
+ dev->irq, dev->name);
+ } else {
+ dev_err(&pdev->dev, "Channel #%d not detected\n",
+ i + 1);
+ free_sja1000dev(dev);
+ }
+ }
+
+ if (!card->channels) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ /*
+ * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
+ * Local_2 interrupts from the SJA1000 chips
+ */
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+
+ return 0;
+
+failure_cleanup:
+ dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+ plx_pci_del_card(pdev);
+
+ return err;
+}
+
+static struct pci_driver plx_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = plx_pci_tbl,
+ .probe = plx_pci_add_card,
+ .remove = plx_pci_del_card,
+};
+
+static int __init plx_pci_init(void)
+{
+ return pci_register_driver(&plx_pci_driver);
+}
+
+static void __exit plx_pci_exit(void)
+{
+ pci_unregister_driver(&plx_pci_driver);
+}
+
+module_init(plx_pci_init);
+module_exit(plx_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b4..145b1a731a53 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -130,8 +130,12 @@ static void set_normal_mode(struct net_device *dev)
/* check reset bit */
if ((status & MOD_RM) == 0) {
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- /* enable all interrupts */
- priv->write_reg(priv, REG_IER, IRQ_ALL);
+ /* enable interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ priv->write_reg(priv, REG_IER, IRQ_ALL);
+ else
+ priv->write_reg(priv, REG_IER,
+ IRQ_ALL & ~IRQ_BEI);
return;
}
@@ -203,6 +207,17 @@ static int sja1000_set_bittiming(struct net_device *dev)
return 0;
}
+static int sja1000_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ bec->txerr = priv->read_reg(priv, REG_TXERR);
+ bec->rxerr = priv->read_reg(priv, REG_RXERR);
+
+ return 0;
+}
+
/*
* initialize SJA1000 chip:
* - reset chip
@@ -249,6 +264,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
uint8_t dreg;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
fi = dlc = cf->can_dlc;
@@ -434,6 +452,8 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
priv->can.state = state;
@@ -564,6 +584,9 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
priv->can.bittiming_const = &sja1000_bittiming_const;
priv->can.do_set_bittiming = sja1000_set_bittiming;
priv->can.do_set_mode = sja1000_set_mode;
+ priv->can.do_get_berr_counter = sja1000_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_BERR_REPORTING;
if (sizeof_priv)
priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da528..0c3d2ba0d178 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -28,9 +28,11 @@
* .mbx_offset = 0x2000,
* .int_line = 0,
* .revision = 1,
+ * .transceiver_switch = hecc_phy_control,
* };
*
- * Please see include/can/platform/ti_hecc.h for description of above fields
+ * Please see include/linux/can/platform/ti_hecc.h for description of
+ * above fields.
*
*/
@@ -220,6 +222,7 @@ struct ti_hecc_priv {
u32 tx_head;
u32 tx_tail;
u32 rx_next;
+ void (*transceiver_switch)(int);
};
static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
@@ -317,6 +320,13 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
return 0;
}
+static void ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
+ int on)
+{
+ if (priv->transceiver_switch)
+ priv->transceiver_switch(on);
+}
+
static void ti_hecc_reset(struct net_device *ndev)
{
u32 cnt;
@@ -477,6 +487,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
mbxno = get_tx_head_mb(priv);
mbx_mask = BIT(mbxno);
spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +504,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
/* Prepare mailbox for transmission */
- data = min_t(u8, cf->can_dlc, 8);
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
data |= HECC_CANMCF_RTR;
data |= get_tx_head_prio(priv) << 8;
@@ -816,15 +828,17 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
+ ti_hecc_transceiver_switch(priv, 1);
+
/* Open common can device */
err = open_candev(ndev);
if (err) {
dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
+ ti_hecc_transceiver_switch(priv, 0);
free_irq(ndev->irq, ndev);
return err;
}
- clk_enable(priv->clk);
ti_hecc_start(ndev);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -840,8 +854,8 @@ static int ti_hecc_close(struct net_device *ndev)
napi_disable(&priv->napi);
ti_hecc_stop(ndev);
free_irq(ndev->irq, ndev);
- clk_disable(priv->clk);
close_candev(ndev);
+ ti_hecc_transceiver_switch(priv, 0);
return 0;
}
@@ -903,10 +917,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->hecc_ram_offset = pdata->hecc_ram_offset;
priv->mbx_offset = pdata->mbx_offset;
priv->int_line = pdata->int_line;
+ priv->transceiver_switch = pdata->transceiver_switch;
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
priv->can.do_get_state = ti_hecc_get_state;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
@@ -925,6 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
HECC_DEF_NAPI_WEIGHT);
+ clk_enable(priv->clk);
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
@@ -953,6 +970,7 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(ndev);
+ clk_disable(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
@@ -964,6 +982,48 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
return 0;
}
+
+#ifdef CONFIG_PM
+static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ti_hecc_priv *priv = netdev_priv(dev);
+
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ }
+
+ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static int ti_hecc_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ti_hecc_priv *priv = netdev_priv(dev);
+
+ clk_enable(priv->clk);
+
+ hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(dev)) {
+ netif_device_attach(dev);
+ netif_start_queue(dev);
+ }
+
+ return 0;
+}
+#else
+#define ti_hecc_suspend NULL
+#define ti_hecc_resume NULL
+#endif
+
/* TI HECC netdevice driver: platform driver structure */
static struct platform_driver ti_hecc_driver = {
.driver = {
@@ -972,6 +1032,8 @@ static struct platform_driver ti_hecc_driver = {
},
.probe = ti_hecc_probe,
.remove = __devexit_p(ti_hecc_remove),
+ .suspend = ti_hecc_suspend,
+ .resume = ti_hecc_resume,
};
static int __init ti_hecc_init_driver(void)
@@ -979,14 +1041,15 @@ static int __init ti_hecc_init_driver(void)
printk(KERN_INFO DRV_DESC "\n");
return platform_driver_register(&ti_hecc_driver);
}
-module_init(ti_hecc_init_driver);
static void __exit ti_hecc_exit_driver(void)
{
printk(KERN_INFO DRV_DESC " unloaded\n");
platform_driver_unregister(&ti_hecc_driver);
}
+
module_exit(ti_hecc_exit_driver);
+module_init(ti_hecc_init_driver);
MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bbc78e0b8a15..97ff6febad63 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -5,6 +5,6 @@ config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
---help---
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
- from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+ from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
endmenu
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf4..d800b598ae3d 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
@@ -873,9 +876,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
return NETDEV_TX_OK;
nomem:
- if (skb)
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
@@ -1005,7 +1006,7 @@ static int ems_usb_probe(struct usb_interface *intf,
netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
if (!netdev) {
- dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
+ dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n");
return -ENOMEM;
}
@@ -1019,8 +1020,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.bittiming_const = &ems_usb_bittiming_const;
dev->can.do_set_bittiming = ems_usb_set_bittiming;
dev->can.do_set_mode = ems_usb_set_mode;
-
- netdev->flags |= IFF_ECHO; /* we support local echo */
+ dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
@@ -1036,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->intr_urb) {
- dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n");
+ dev_err(&intf->dev, "Couldn't alloc intr URB\n");
goto cleanup_candev;
}
dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
if (!dev->intr_in_buffer) {
- dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n");
+ dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
goto cleanup_intr_urb;
}
dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
sizeof(struct ems_cpc_msg), GFP_KERNEL);
if (!dev->tx_msg_buffer) {
- dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n");
+ dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
goto cleanup_intr_in_buffer;
}
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac56313981..a30b8f480f61 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,8 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/slab.h>
#include <net/rtnetlink.h>
static __initdata const char banner[] =
@@ -70,10 +72,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +88,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += cf->can_dlc;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +110,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
}
kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e488..9bd155e4111c 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -66,6 +66,7 @@
* by default, the selective clear mask is set up to process rx packets.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -106,7 +107,7 @@
#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
#define CAS_NCPUS num_online_cpus()
-#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#ifdef CONFIG_CASSINI_NAPI
#define USE_NAPI
#define cas_skb_release(x) netif_receive_skb(x)
#else
@@ -143,7 +144,6 @@
#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
#define DRV_MODULE_NAME "cassini"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.6"
#define DRV_MODULE_RELDATE "21 May 2008"
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
@@ -649,9 +649,8 @@ static cas_page_t *cas_page_dequeue(struct cas *cp)
cas_spare_recover(cp, GFP_ATOMIC);
spin_lock(&cp->rx_spare_lock);
if (list_empty(&cp->rx_spare_list)) {
- if (netif_msg_rx_err(cp))
- printk(KERN_ERR "%s: no spare buffers "
- "available.\n", cp->dev->name);
+ netif_err(cp, rx_err, cp->dev,
+ "no spare buffers available\n");
spin_unlock(&cp->rx_spare_lock);
return NULL;
}
@@ -728,12 +727,10 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
#endif
start_aneg:
if (cp->lstate == link_up) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "PCS link down\n");
} else {
if (changed) {
- printk(KERN_INFO "%s: link configuration changed\n",
- cp->dev->name);
+ netdev_info(cp->dev, "link configuration changed\n");
}
}
cp->lstate = link_down;
@@ -826,12 +823,12 @@ static int cas_saturn_firmware_init(struct cas *cp)
err = request_firmware(&fw, fw_name, &cp->pdev->dev);
if (err) {
- printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n",
+ pr_err("Failed to load firmware \"%s\"\n",
fw_name);
return err;
}
if (fw->size < 2) {
- printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n",
+ pr_err("bogus length %zu in \"%s\"\n",
fw->size, fw_name);
err = -EINVAL;
goto out;
@@ -841,7 +838,7 @@ static int cas_saturn_firmware_init(struct cas *cp)
cp->fw_data = vmalloc(cp->fw_size);
if (!cp->fw_data) {
err = -ENOMEM;
- printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err);
+ pr_err("\"%s\" Failed %d\n", fw_name, err);
goto out;
}
memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
@@ -986,9 +983,8 @@ static void cas_phy_init(struct cas *cp)
break;
}
if (limit <= 0)
- printk(KERN_WARNING "%s: PCS reset bit would not "
- "clear [%08x].\n", cp->dev->name,
- readl(cp->regs + REG_PCS_STATE_MACHINE));
+ netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+ readl(cp->regs + REG_PCS_STATE_MACHINE));
/* Make sure PCS is disabled while changing advertisement
* configuration.
@@ -1030,11 +1026,8 @@ static int cas_pcs_link_check(struct cas *cp)
*/
if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
PCS_MII_STATUS_REMOTE_FAULT)) ==
- (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: PCS RemoteFault\n",
- cp->dev->name);
- }
+ (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+ netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
/* work around link detection issue by querying the PCS state
* machine directly.
@@ -1081,10 +1074,8 @@ static int cas_pcs_link_check(struct cas *cp)
cp->link_transition = LINK_TRANSITION_ON_FAILURE;
}
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp)) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
- }
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "PCS link down\n");
/* Cassini only: if you force a mode, there can be
* sync problems on link down. to fix that, the following
@@ -1139,9 +1130,8 @@ static int cas_txmac_interrupt(struct net_device *dev,
if (!txmac_stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
- cp->dev->name, txmac_stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
/* Defer timer expiration is quite normal,
* don't even log the event.
@@ -1152,14 +1142,12 @@ static int cas_txmac_interrupt(struct net_device *dev,
spin_lock(&cp->stat_lock[0]);
if (txmac_stat & MAC_TX_UNDERRUN) {
- printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
- dev->name);
+ netdev_err(dev, "TX MAC xmit underrun\n");
cp->net_stats[0].tx_fifo_errors++;
}
if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
- printk(KERN_ERR "%s: TX MAC max packet size error.\n",
- dev->name);
+ netdev_err(dev, "TX MAC max packet size error\n");
cp->net_stats[0].tx_errors++;
}
@@ -1487,8 +1475,7 @@ static int cas_rxmac_reset(struct cas *cp)
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
return 1;
}
@@ -1500,8 +1487,7 @@ static int cas_rxmac_reset(struct cas *cp)
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
return 1;
}
@@ -1515,8 +1501,7 @@ static int cas_rxmac_reset(struct cas *cp)
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX reset command will not execute, "
- "resetting whole chip.\n", dev->name);
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
return 1;
}
@@ -1545,9 +1530,7 @@ static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
/* these are all rollovers */
spin_lock(&cp->stat_lock[0]);
@@ -1580,9 +1563,8 @@ static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "mac interrupt, stat: 0x%x\n", stat);
/* This interrupt is just for pause frame and pause
* tracking. It is useful for diagnostics and debug
@@ -1605,9 +1587,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
switch (cp->lstate) {
case link_force_ret:
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Autoneg failed again, keeping"
- " forced mode\n", cp->dev->name);
+ netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
cp->timer_ticks = 5;
cp->lstate = link_force_ok;
@@ -1675,9 +1655,9 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
cas_mif_poll(cp, 0);
cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
cp->timer_ticks = 5;
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Got link after fallback, retrying"
- " autoneg once...\n", cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
cas_phy_write(cp, MII_BMCR,
cp->link_fcntl | BMCR_ANENABLE |
BMCR_ANRESTART);
@@ -1704,9 +1684,8 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
cp->link_transition = LINK_TRANSITION_LINK_DOWN;
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Link down\n",
- cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "Link down\n");
restart = 1;
} else if (++cp->timer_ticks > 10)
@@ -1737,23 +1716,23 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
if (!stat)
return 0;
- printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
- readl(cp->regs + REG_BIM_DIAG));
+ netdev_err(dev, "PCI error [%04x:%04x]",
+ stat, readl(cp->regs + REG_BIM_DIAG));
/* cassini+ has this reserved */
if ((stat & PCI_ERR_BADACK) &&
((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
- printk("<No ACK64# during ABS64 cycle> ");
+ pr_cont(" <No ACK64# during ABS64 cycle>");
if (stat & PCI_ERR_DTRTO)
- printk("<Delayed transaction timeout> ");
+ pr_cont(" <Delayed transaction timeout>");
if (stat & PCI_ERR_OTHER)
- printk("<other> ");
+ pr_cont(" <other>");
if (stat & PCI_ERR_BIM_DMA_WRITE)
- printk("<BIM DMA 0 write req> ");
+ pr_cont(" <BIM DMA 0 write req>");
if (stat & PCI_ERR_BIM_DMA_READ)
- printk("<BIM DMA 0 read req> ");
- printk("\n");
+ pr_cont(" <BIM DMA 0 read req>");
+ pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
u16 cfg;
@@ -1762,25 +1741,19 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
* true cause.
*/
pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
- printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
- dev->name, cfg);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
if (cfg & PCI_STATUS_PARITY)
- printk(KERN_ERR "%s: PCI parity error detected.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error detected\n");
if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI target abort\n");
if (cfg & PCI_STATUS_REC_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI master acks target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master acks target abort\n");
if (cfg & PCI_STATUS_REC_MASTER_ABORT)
- printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
+ netdev_err(dev, "PCI master abort\n");
if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
- printk(KERN_ERR "%s: PCI system error SERR#.\n",
- dev->name);
+ netdev_err(dev, "PCI system error SERR#\n");
if (cfg & PCI_STATUS_DETECTED_PARITY)
- printk(KERN_ERR "%s: PCI parity error.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error\n");
/* Write the error bits back to clear them. */
cfg &= (PCI_STATUS_PARITY |
@@ -1806,9 +1779,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
{
if (status & INTR_RX_TAG_ERROR) {
/* corrupt RX tag framing */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "corrupt rx tag framing\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
@@ -1817,9 +1789,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
if (status & INTR_RX_LEN_MISMATCH) {
/* length mismatch. */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "length mismatch for rx frame\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
@@ -1861,12 +1832,11 @@ do_reset:
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
- printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
- dev->name, status);
+ netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_abnormal_irq\n");
+ netdev_err(dev, "reset called in cas_abnormal_irq\n");
schedule_work(&cp->reset_task);
#endif
return 1;
@@ -1920,9 +1890,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
if (count < 0)
break;
- if (netif_msg_tx_done(cp))
- printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+ "tx[%d] done, slot %d\n", ring, entry);
skbs[entry] = NULL;
cp->tx_tiny_use[ring][entry].nbufs = 0;
@@ -1969,9 +1938,9 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
#ifdef USE_TX_COMPWB
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
#endif
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
- cp->dev->name, status, (unsigned long long)compwb);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "tx interrupt, status: 0x%x, %llx\n",
+ status, (unsigned long long)compwb);
/* process all the rings */
for (ring = 0; ring < N_TX_RINGS; ring++) {
#ifdef USE_TX_COMPWB
@@ -2050,10 +2019,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
@@ -2130,10 +2097,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
@@ -2265,9 +2230,8 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
entry = cp->rx_old[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rxd[%d] interrupt, done: %d\n", ring, entry);
cluster = -1;
count = entry & 0x3;
@@ -2337,11 +2301,10 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
int entry, drops;
int npackets = 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
- cp->dev->name, ring,
- readl(cp->regs + REG_RX_COMP_HEAD),
- cp->rx_new[ring]);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rx[%d] interrupt, done: %d/%d\n",
+ ring,
+ readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
entry = cp->rx_new[ring];
drops = 0;
@@ -2442,8 +2405,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
cp->rx_new[ring] = entry;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
return npackets;
}
@@ -2457,10 +2419,9 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
last = cp->rx_cur[ring];
entry = cp->rx_new[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
- dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
- entry);
+ netif_printk(cp, intr, KERN_DEBUG, dev,
+ "rxc[%d] interrupt, done: %d/%d\n",
+ ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
/* zero and re-mark descriptors */
while (last != entry) {
@@ -2729,42 +2690,38 @@ static void cas_tx_timeout(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
if (!cp->hw_running) {
- printk("%s: hrm.. hw not running!\n", dev->name);
+ netdev_err(dev, "hrm.. hw not running!\n");
return;
}
- printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
-
- printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
-
- printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
- "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
- dev->name,
- readl(cp->regs + REG_TX_CFG),
- readl(cp->regs + REG_MAC_TX_STATUS),
- readl(cp->regs + REG_MAC_TX_CFG),
- readl(cp->regs + REG_TX_FIFO_PKT_CNT),
- readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
- readl(cp->regs + REG_TX_FIFO_READ_PTR),
- readl(cp->regs + REG_TX_SM_1),
- readl(cp->regs + REG_TX_SM_2));
-
- printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_RX_CFG),
- readl(cp->regs + REG_MAC_RX_STATUS),
- readl(cp->regs + REG_MAC_RX_CFG));
-
- printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_HP_STATE_MACHINE),
- readl(cp->regs + REG_HP_STATUS0),
- readl(cp->regs + REG_HP_STATUS1),
- readl(cp->regs + REG_HP_STATUS2));
+ netdev_err(dev, "MIF_STATE[%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+ netdev_err(dev, "MAC_STATE[%08x]\n",
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+ readl(cp->regs + REG_TX_CFG),
+ readl(cp->regs + REG_MAC_TX_STATUS),
+ readl(cp->regs + REG_MAC_TX_CFG),
+ readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+ readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+ readl(cp->regs + REG_TX_FIFO_READ_PTR),
+ readl(cp->regs + REG_TX_SM_1),
+ readl(cp->regs + REG_TX_SM_2));
+
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_RX_CFG),
+ readl(cp->regs + REG_MAC_RX_STATUS),
+ readl(cp->regs + REG_MAC_RX_CFG));
+
+ netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_HP_STATE_MACHINE),
+ readl(cp->regs + REG_HP_STATUS0),
+ readl(cp->regs + REG_HP_STATUS1),
+ readl(cp->regs + REG_HP_STATUS2));
#if 1
atomic_inc(&cp->reset_task_pending);
@@ -2830,8 +2787,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return 1;
}
@@ -2908,11 +2864,9 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
- if (netif_msg_tx_queued(cp))
- printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
- "avail %d\n",
- dev->name, ring, entry, skb->len,
- TX_BUFFS_AVAIL(cp, ring));
+ netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+ "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+ ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
writel(entry, cp->regs + REG_TX_KICKN(ring));
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
return 0;
@@ -2999,6 +2953,40 @@ static inline void cas_init_dma(struct cas *cp)
cas_init_rx_dma(cp);
}
+static void cas_process_mc_list(struct cas *cp)
+{
+ u16 hash_table[16];
+ u32 crc;
+ struct dev_mc_list *dmi;
+ int i = 1;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, cp->dev) {
+ if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+ /* use the alternate mac address registers for the
+ * first 15 multicast addresses
+ */
+ writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ i++;
+ }
+ else {
+ /* use hw hash table for the next series of
+ * multicast addresses
+ */
+ crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ }
+ for (i = 0; i < 16; i++)
+ writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
/* Must be invoked under cp->lock. */
static u32 cas_setup_multicast(struct cas *cp)
{
@@ -3014,43 +3002,7 @@ static u32 cas_setup_multicast(struct cas *cp)
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
} else {
- u16 hash_table[16];
- u32 crc;
- struct dev_mc_list *dmi = cp->dev->mc_list;
- int i;
-
- /* use the alternate mac address registers for the
- * first 15 multicast addresses
- */
- for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
- if (!dmi) {
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
- continue;
- }
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
- cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
- cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
- cp->regs + REG_MAC_ADDRN(i*3 + 2));
- dmi = dmi->next;
- }
-
- /* use hw hash table for the next series of
- * multicast addresses
- */
- memset(hash_table, 0, sizeof(hash_table));
- while (dmi) {
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
- crc >>= 24;
- hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
- dmi = dmi->next;
- }
- for (i=0; i < 16; i++)
- writel(hash_table[i], cp->regs +
- REG_MAC_HASH_TABLEN(i));
+ cas_process_mc_list(cp);
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
}
@@ -3100,10 +3052,10 @@ static void cas_mac_reset(struct cas *cp)
if (readl(cp->regs + REG_MAC_TX_RESET) |
readl(cp->regs + REG_MAC_RX_RESET))
- printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
- cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
- readl(cp->regs + REG_MAC_RX_RESET),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+ readl(cp->regs + REG_MAC_TX_RESET),
+ readl(cp->regs + REG_MAC_RX_RESET),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
@@ -3423,7 +3375,7 @@ use_random_mac_addr:
goto done;
/* Sun MAC prefix then 3 random bytes. */
- printk(PFX "MAC address not found in ROM VPD\n");
+ pr_info("MAC address not found in ROM VPD\n");
dev_addr[0] = 0x08;
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
@@ -3484,7 +3436,7 @@ static int cas_check_invariants(struct cas *cp)
__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
} else {
- printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
+ printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
}
}
#endif
@@ -3529,7 +3481,7 @@ static int cas_check_invariants(struct cas *cp)
}
}
}
- printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
+ pr_err("MII phy did not respond [%08x]\n",
readl(cp->regs + REG_MIF_STATE_MACHINE));
return -1;
@@ -3574,21 +3526,19 @@ static inline void cas_start_dma(struct cas *cp)
val = readl(cp->regs + REG_MAC_RX_CFG);
if ((val & MAC_RX_CFG_EN)) {
if (txfailed) {
- printk(KERN_ERR
- "%s: enabling mac failed [tx:%08x:%08x].\n",
- cp->dev->name,
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev,
+ "enabling mac failed [tx:%08x:%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
goto enable_rx_done;
}
udelay(10);
}
- printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
- cp->dev->name,
- (txfailed? "tx,rx":"rx"),
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+ (txfailed ? "tx,rx" : "rx"),
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
enable_rx_done:
cas_unmask_intr(cp); /* enable interrupts */
@@ -3690,9 +3640,8 @@ static void cas_set_link_modes(struct cas *cp)
}
}
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
- cp->dev->name, speed, (full_duplex ? "full" : "half"));
+ netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+ speed, full_duplex ? "full" : "half");
val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
if (CAS_PHY_MII(cp->phy_type)) {
@@ -3762,18 +3711,14 @@ static void cas_set_link_modes(struct cas *cp)
if (netif_msg_link(cp)) {
if (pause & 0x01) {
- printk(KERN_INFO "%s: Pause is enabled "
- "(rxfifo: %d off: %d on: %d)\n",
- cp->dev->name,
- cp->rx_fifo_size,
- cp->rx_pause_off,
- cp->rx_pause_on);
+ netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ cp->rx_fifo_size,
+ cp->rx_pause_off,
+ cp->rx_pause_on);
} else if (pause & 0x10) {
- printk(KERN_INFO "%s: TX pause enabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "TX pause enabled\n");
} else {
- printk(KERN_INFO "%s: Pause is disabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Pause is disabled\n");
}
}
@@ -3849,7 +3794,7 @@ static void cas_global_reset(struct cas *cp, int blkflag)
goto done;
udelay(10);
}
- printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
+ netdev_err(cp->dev, "sw reset failed\n");
done:
/* enable various BIM interrupts */
@@ -3955,7 +3900,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
#else
atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
CAS_RESET_ALL : CAS_RESET_MTU);
- printk(KERN_ERR "reset called in cas_change_mtu\n");
+ pr_err("reset called in cas_change_mtu\n");
schedule_work(&cp->reset_task);
#endif
@@ -4237,10 +4182,8 @@ static void cas_link_timer(unsigned long data)
if (((tlm == 0x5) || (tlm == 0x3)) &&
(CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "MAC_STATE[%08x]\n",
- cp->dev->name, val);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: MAC_STATE[%08x]\n", val);
reset = 1;
goto done;
}
@@ -4249,10 +4192,9 @@ static void cas_link_timer(unsigned long data)
wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
if ((val == 0) && (wptr != rptr)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "TX_FIFO[%08x:%08x:%08x]\n",
- cp->dev->name, val, wptr, rptr);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+ val, wptr, rptr);
reset = 1;
}
@@ -4268,7 +4210,7 @@ done:
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_link_timer\n");
+ pr_err("reset called in cas_link_timer\n");
schedule_work(&cp->reset_task);
#endif
}
@@ -4361,8 +4303,7 @@ static int cas_open(struct net_device *dev)
*/
if (request_irq(cp->pdev->irq, cas_interrupt,
IRQF_SHARED, dev->name, (void *) dev)) {
- printk(KERN_ERR "%s: failed to request irq !\n",
- cp->dev->name);
+ netdev_err(cp->dev, "failed to request irq !\n");
err = -EAGAIN;
goto err_spare;
}
@@ -5002,24 +4943,24 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
if (cas_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find proper PCI device "
- "base address, aborting.\n");
+ "base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
- dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_disable_pdev;
}
@@ -5027,7 +4968,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, dev->name);
if (err) {
- dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_free_netdev;
}
pci_set_master(pdev);
@@ -5041,8 +4982,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
if (pci_try_set_mwi(pdev))
- printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
- pci_name(pdev));
+ pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
cas_program_bridge(pdev);
@@ -5085,7 +5025,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, "
- "aborting.\n");
+ "aborting\n");
goto err_out_free_res;
}
pci_using_dac = 0;
@@ -5132,7 +5072,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
INIT_WORK(&cp->reset_task, cas_reset_task);
/* Default link parameters */
- if (link_mode >= 0 && link_mode <= 6)
+ if (link_mode >= 0 && link_mode < 6)
cp->link_cntl = link_modes[link_mode];
else
cp->link_cntl = BMCR_ANENABLE;
@@ -5144,7 +5084,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
/* give us access to cassini registers */
cp->regs = pci_iomap(pdev, 0, casreg_len);
if (!cp->regs) {
- dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
goto err_out_free_res;
}
cp->casreg_len = casreg_len;
@@ -5163,7 +5103,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
- dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
goto err_out_iounmap;
}
@@ -5197,18 +5137,17 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HIGHDMA;
if (register_netdev(dev)) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_free_consistent;
}
i = readl(cp->regs + REG_BIM_CFG);
- printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
- "Ethernet[%d] %pM\n", dev->name,
- (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
- (i & BIM_CFG_32BIT) ? "32" : "64",
- (i & BIM_CFG_66MHZ) ? "66" : "33",
- (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
- dev->dev_addr);
+ netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ (i & BIM_CFG_32BIT) ? "32" : "64",
+ (i & BIM_CFG_66MHZ) ? "66" : "33",
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+ dev->dev_addr);
pci_set_drvdata(pdev, dev);
cp->hw_running = 1;
@@ -5322,7 +5261,7 @@ static int cas_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp = netdev_priv(dev);
- printk(KERN_INFO "%s: resuming\n", dev->name);
+ netdev_info(dev, "resuming\n");
mutex_lock(&cp->pm_mutex);
cas_hard_reset(cp);
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe09..036b2dfb1d40 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -36,6 +36,8 @@
* *
****************************************************************************/
+#define pr_fmt(fmt) "cxgb: " fmt
+
#ifndef _CXGB_COMMON_H_
#define _CXGB_COMMON_H_
@@ -49,34 +51,13 @@
#include <linux/mdio.h>
#include <linux/crc32.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
#define DRV_VERSION "2.2"
-#define PFX DRV_NAME ": "
-
-#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
-#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
-#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
-
-/*
- * More powerful macro that selectively prints messages based on msg_enable.
- * For info and debugging messages.
- */
-#define CH_MSG(adapter, level, category, fmt, ...) do { \
- if ((adapter)->msg_enable & NETIF_MSG_##category) \
- printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
- ## __VA_ARGS__); \
-} while (0)
-
-#ifdef DEBUG
-# define CH_DBG(adapter, category, fmt, ...) \
- CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
-#else
-# define CH_DBG(fmt, ...)
-#endif
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
@@ -90,25 +71,13 @@
typedef struct adapter adapter_t;
struct t1_rx_mode {
- struct net_device *dev;
- u32 idx;
- struct dev_mc_list *list;
+ struct net_device *dev;
};
#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
-#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
-
-static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
-{
- u8 *addr = NULL;
-
- if (rm->idx++ < rm->dev->mc_count) {
- addr = rm->list->dmi_addr;
- rm->list = rm->list->next;
- }
- return addr;
-}
+#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
+#define t1_get_netdev(rm) (rm->dev)
#define MAX_NPORTS 4
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
@@ -334,7 +303,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
return adapter->params.is_asic;
}
-extern struct pci_device_id t1_pci_tbl[];
+extern const struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
int version, int revision)
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 082cdb28b510..0f71304e0542 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -125,8 +125,6 @@ static void t1_set_rxmode(struct net_device *dev)
struct t1_rx_mode rm;
rm.dev = dev;
- rm.idx = 0;
- rm.list = dev->mc_list;
mac->ops->set_rx_mode(mac, &rm);
}
@@ -976,7 +974,7 @@ void t1_fatal_err(struct adapter *adapter)
t1_sge_stop(adapter->sge);
t1_interrupts_disable(adapter);
}
- CH_ALERT("%s: encountered fatal error, operation suspended\n",
+ pr_alert("%s: encountered fatal error, operation suspended\n",
adapter->name);
}
@@ -1020,7 +1018,7 @@ static int __devinit init_one(struct pci_dev *pdev,
return err;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- CH_ERR("%s: cannot find PCI device memory base address\n",
+ pr_err("%s: cannot find PCI device memory base address\n",
pci_name(pdev));
err = -ENODEV;
goto out_disable_pdev;
@@ -1030,20 +1028,20 @@ static int __devinit init_one(struct pci_dev *pdev,
pci_using_dac = 1;
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- CH_ERR("%s: unable to obtain 64-bit DMA for "
+ pr_err("%s: unable to obtain 64-bit DMA for "
"consistent allocations\n", pci_name(pdev));
err = -ENODEV;
goto out_disable_pdev;
}
} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
- CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
+ pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
+ pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
goto out_disable_pdev;
}
@@ -1071,7 +1069,7 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->regs = ioremap(mmio_start, mmio_len);
if (!adapter->regs) {
- CH_ERR("%s: cannot map device registers\n",
+ pr_err("%s: cannot map device registers\n",
pci_name(pdev));
err = -ENOMEM;
goto out_free_dev;
@@ -1150,8 +1148,8 @@ static int __devinit init_one(struct pci_dev *pdev,
for (i = 0; i < bi->port_number; ++i) {
err = register_netdev(adapter->port[i].dev);
if (err)
- CH_WARN("%s: cannot register net device %s, skipping\n",
- pci_name(pdev), adapter->port[i].dev->name);
+ pr_warning("%s: cannot register net device %s, skipping\n",
+ pci_name(pdev), adapter->port[i].dev->name);
else {
/*
* Change the name we use for messages to the name of
@@ -1164,7 +1162,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
}
if (!adapter->registered_device_map) {
- CH_ERR("%s: could not register any net devices\n",
+ pr_err("%s: could not register any net devices\n",
pci_name(pdev));
goto out_release_adapter_res;
}
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 1e0749e000b0..639ff1955739 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -76,7 +76,7 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
} while (busy && --attempts);
if (busy)
- CH_ERR("%s: TRICN write timed out\n", adapter->name);
+ pr_err("%s: TRICN write timed out\n", adapter->name);
return busy;
}
@@ -86,7 +86,7 @@ static int tricn_init(adapter_t *adapter)
int i, sme = 1;
if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) {
- CH_ERR("%s: ESPI clock not ready\n", adapter->name);
+ pr_err("%s: ESPI clock not ready\n", adapter->name);
return -1;
}
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 2117c4fbb107..9e631b9d3948 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -44,6 +44,7 @@
#include "suni1x10gexp_regs.h"
#include <linux/crc32.h>
+#include <linux/slab.h>
#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
@@ -251,8 +252,9 @@ static int pm3393_interrupt_handler(struct cmac *cmac)
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
- CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n",
- master_intr_status);
+ if (netif_msg_intr(cmac->adapter))
+ dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
+ master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
@@ -375,12 +377,12 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
- u8 *addr;
+ struct dev_mc_list *dmi;
int bit;
u16 mc_filter[4] = { 0, };
- while ((addr = t1_get_next_mcaddr(rm))) {
- bit = (ether_crc(ETH_ALEN, addr) >> 23) & 0x3f; /* bit[23:28] */
+ netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) {
+ bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
@@ -776,11 +778,12 @@ static int pm3393_mac_reset(adapter_t * adapter)
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
- CH_DBG(adapter, HW,
- "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
- "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
- i, is_pl4_reset_finished, val, is_pl4_outof_lock,
- is_xaui_mabc_pll_locked);
+ if (netif_msg_hw(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
+ "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
+ i, is_pl4_reset_finished, val,
+ is_pl4_outof_lock, is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 109d2783e4d8..df3a1410696e 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -53,6 +53,7 @@
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/if_arp.h>
+#include <linux/slab.h>
#include "cpl5_cmd.h"
#include "sge.h"
@@ -248,7 +249,7 @@ static void restart_sched(unsigned long);
*
* Interrupts are handled by a single CPU and it is likely that on a MP system
* the application is migrated to another CPU. In that scenario, we try to
- * seperate the RX(in irq context) and TX state in order to decrease memory
+ * separate the RX(in irq context) and TX state in order to decrease memory
* contention.
*/
struct sge {
@@ -267,7 +268,7 @@ struct sge {
struct sk_buff *espibug_skb[MAX_NPORTS];
u32 sge_control; /* shadow value of sge control reg */
struct sge_intr_counts stats;
- struct sge_port_stats *port_stats[MAX_NPORTS];
+ struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
struct sched *tx_sched;
struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
};
@@ -953,7 +954,7 @@ int t1_sge_intr_error_handler(struct sge *sge)
sge->stats.respQ_empty++;
if (cause & F_RESPQ_OVERFLOW) {
sge->stats.respQ_overflow++;
- CH_ALERT("%s: SGE response queue overflow\n",
+ pr_alert("%s: SGE response queue overflow\n",
adapter->name);
}
if (cause & F_FL_EXHAUSTED) {
@@ -962,12 +963,12 @@ int t1_sge_intr_error_handler(struct sge *sge)
}
if (cause & F_PACKET_TOO_BIG) {
sge->stats.pkt_too_big++;
- CH_ALERT("%s: SGE max packet size exceeded\n",
+ pr_alert("%s: SGE max packet size exceeded\n",
adapter->name);
}
if (cause & F_PACKET_MISMATCH) {
sge->stats.pkt_mismatch++;
- CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
+ pr_alert("%s: SGE packet mismatch\n", adapter->name);
}
if (cause & SGE_INT_FATAL)
t1_fatal_err(adapter);
@@ -1101,7 +1102,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
- CH_ERR("%s: unexpected offload packet, cmd %u\n",
+ pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
}
@@ -1687,7 +1688,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
netif_stop_queue(dev);
set_bit(dev->if_port, &sge->stopped_tx_queues);
sge->stats.cmdQ_full[2]++;
- CH_ERR("%s: Tx ring full while queue awake!\n",
+ pr_err("%s: Tx ring full while queue awake!\n",
adapter->name);
}
spin_unlock(&q->lock);
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bfe..53bde15fc94d 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -90,7 +90,7 @@ int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
TPI_ATTEMPTS, 3);
if (tpi_busy)
- CH_ALERT("%s: TPI write to 0x%x failed\n",
+ pr_alert("%s: TPI write to 0x%x failed\n",
adapter->name, addr);
return tpi_busy;
}
@@ -118,7 +118,7 @@ int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
TPI_ATTEMPTS, 3);
if (tpi_busy)
- CH_ALERT("%s: TPI read from 0x%x failed\n",
+ pr_alert("%s: TPI read from 0x%x failed\n",
adapter->name, addr);
else
*valp = readl(adapter->regs + A_TPI_RD_DATA);
@@ -262,7 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
udelay(10);
} while (busy && --attempts);
if (busy)
- CH_ALERT("%s: MDIO operation timed out\n", adapter->name);
+ pr_alert("%s: MDIO operation timed out\n", adapter->name);
return busy;
}
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
};
-struct pci_device_id t1_pci_tbl[] = {
+DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
CH_DEVICE(8, 0, CH_BRD_T110_1CU),
CH_DEVICE(8, 1, CH_BRD_T110_1CU),
CH_DEVICE(7, 0, CH_BRD_N110_1F),
@@ -581,7 +581,7 @@ int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data)
} while (!(val & F_VPD_OP_FLAG) && --i);
if (!(val & F_VPD_OP_FLAG)) {
- CH_ERR("%s: reading EEPROM address 0x%x failed\n",
+ pr_err("%s: reading EEPROM address 0x%x failed\n",
adapter->name, addr);
return -EIO;
}
@@ -734,8 +734,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
break;
case CHBT_BOARD_8000:
case CHBT_BOARD_CHT110:
- CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
- cause);
+ if (netif_msg_intr(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "External interrupt cause 0x%x\n", cause);
if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
struct cmac *mac = adapter->port[0].mac;
@@ -746,8 +747,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
t1_tpi_read(adapter,
A_ELMER0_GPI_STAT, &mod_detect);
- CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
- mod_detect ? "removed" : "inserted");
+ if (netif_msg_link(adapter))
+ dev_info(&adapter->pdev->dev, "XPAK %s\n",
+ mod_detect ? "removed" : "inserted");
}
break;
#ifdef CONFIG_CHELSIO_T1_COUGAR
@@ -1084,7 +1086,7 @@ static void __devinit init_link_config(struct link_config *lc,
#ifdef CONFIG_CHELSIO_T1_COUGAR
if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
- CH_ERR("%s: CSPI initialization failed\n",
+ pr_err("%s: CSPI initialization failed\n",
adapter->name);
goto error;
}
@@ -1105,20 +1107,20 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
if (!adapter->sge) {
- CH_ERR("%s: SGE initialization failed\n",
+ pr_err("%s: SGE initialization failed\n",
adapter->name);
goto error;
}
if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
- CH_ERR("%s: ESPI initialization failed\n",
+ pr_err("%s: ESPI initialization failed\n",
adapter->name);
goto error;
}
adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
if (!adapter->tp) {
- CH_ERR("%s: TP initialization failed\n",
+ pr_err("%s: TP initialization failed\n",
adapter->name);
goto error;
}
@@ -1138,14 +1140,14 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev,
phy_addr, bi->mdio_ops);
if (!adapter->port[i].phy) {
- CH_ERR("%s: PHY %d initialization failed\n",
+ pr_err("%s: PHY %d initialization failed\n",
adapter->name, i);
goto error;
}
adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
if (!mac) {
- CH_ERR("%s: MAC %d initialization failed\n",
+ pr_err("%s: MAC %d initialization failed\n",
adapter->name, i);
goto error;
}
@@ -1157,7 +1159,7 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
mac->ops->macaddress_get(mac, hw_addr);
else if (vpd_macaddress_get(adapter, i, hw_addr)) {
- CH_ERR("%s: could not read MAC address from VPD ROM\n",
+ pr_err("%s: could not read MAC address from VPD ROM\n",
adapter->port[i].dev->name);
goto error;
}
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index 99b51f61fe77..c844111cffeb 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -48,14 +48,14 @@ static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
i++;
} while (((status & 1) == 0) && (i < 50));
if (i == 50)
- CH_ERR("Invalid tpi read from MAC, breaking loop.\n");
+ pr_err("Invalid tpi read from MAC, breaking loop.\n");
t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
*val = (vhi << 16) | vlo;
- /* CH_ERR("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ /* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
((addr&0xe000)>>13), ((addr&0x1e00)>>9),
((addr&0x01fe)>>1), *val); */
spin_unlock_bh(&adapter->mac_lock);
@@ -66,7 +66,7 @@ static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
spin_lock_bh(&adapter->mac_lock);
t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
- /* CH_ERR("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ /* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
((addr&0xe000)>>13), ((addr&0x1e00)>>9),
((addr&0x01fe)>>1), data); */
spin_unlock_bh(&adapter->mac_lock);
@@ -225,7 +225,7 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len)
for (i = 0; i < len; i++) {
if (ib[i].addr == INITBLOCK_SLEEP) {
udelay( ib[i].data );
- CH_ERR("sleep %d us\n",ib[i].data);
+ pr_err("sleep %d us\n",ib[i].data);
} else
vsc_write( adapter, ib[i].addr, ib[i].data );
}
@@ -241,7 +241,7 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
(address != 0x2) &&
(address != 0xd) &&
(address != 0xe))
- CH_ERR("No bist address: 0x%x\n", address);
+ pr_err("No bist address: 0x%x\n", address);
data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
((moduleid & 0xff) << 0));
@@ -251,9 +251,9 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
if ((result & (1 << 9)) != 0x0)
- CH_ERR("Still in bist read: 0x%x\n", result);
+ pr_err("Still in bist read: 0x%x\n", result);
else if ((result & (1 << 8)) != 0x0)
- CH_ERR("bist read error: 0x%x\n", result);
+ pr_err("bist read error: 0x%x\n", result);
return (result & 0xff);
}
@@ -268,10 +268,10 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
(address != 0x2) &&
(address != 0xd) &&
(address != 0xe))
- CH_ERR("No bist address: 0x%x\n", address);
+ pr_err("No bist address: 0x%x\n", address);
if (value > 255)
- CH_ERR("Suspicious write out of range value: 0x%x\n", value);
+ pr_err("Suspicious write out of range value: 0x%x\n", value);
data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
((moduleid & 0xff) << 0));
@@ -281,9 +281,9 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
vsc_read(adapter, REG_RAM_BIST_CMD, &result);
if ((result & (1 << 27)) != 0x0)
- CH_ERR("Still in bist write: 0x%x\n", result);
+ pr_err("Still in bist write: 0x%x\n", result);
else if ((result & (1 << 26)) != 0x0)
- CH_ERR("bist write error: 0x%x\n", result);
+ pr_err("bist write error: 0x%x\n", result);
return 0;
}
@@ -306,7 +306,7 @@ static int check_bist(adapter_t *adapter, int moduleid)
column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
(bist_rd(adapter,moduleid, 0x0d)));
if ((result & 3) != 0x3)
- CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
+ pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
result, moduleid, column);
return 0;
}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4332b3a2fafb..4b451a7c03e9 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2009 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,6 +10,8 @@
* Modified and maintained by: Michael Chan <mchan@broadcom.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -47,7 +49,6 @@
#include "cnic_defs.h"
#define DRV_MODULE_NAME "cnic"
-#define PFX DRV_MODULE_NAME ": "
static char version[] __devinitdata =
"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
@@ -326,6 +327,12 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
if (l5_cid >= MAX_CM_SK_TBL_SZ)
break;
+ rcu_read_lock();
+ if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
+ rc = -ENODEV;
+ rcu_read_unlock();
+ break;
+ }
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (cnic_in_use(csk)) {
@@ -340,6 +347,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
cnic_cm_set_pg(csk);
}
csk_put(csk);
+ rcu_read_unlock();
rc = 0;
}
}
@@ -409,14 +417,13 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
struct cnic_dev *dev;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl[ulp_type]) {
- printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
- "been registered\n", ulp_type);
+ pr_err("%s: Type %d has already been registered\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
@@ -455,15 +462,14 @@ int cnic_unregister_driver(int ulp_type)
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl[ulp_type];
if (!ulp_ops) {
- printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
- "been registered\n", ulp_type);
+ pr_err("%s: Type %d has not been registered\n",
+ __func__, ulp_type);
goto out_unlock;
}
read_lock(&cnic_dev_lock);
@@ -471,8 +477,8 @@ int cnic_unregister_driver(int ulp_type)
struct cnic_local *cp = dev->cnic_priv;
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
- "still has devices registered\n", ulp_type);
+ pr_err("%s: Type %d still has devices registered\n",
+ __func__, ulp_type);
read_unlock(&cnic_dev_lock);
goto out_unlock;
}
@@ -492,8 +498,7 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
- printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
- " to zero.\n", dev->netdev->name);
+ netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
return 0;
out_unlock:
@@ -511,20 +516,19 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
struct cnic_ulp_ops *ulp_ops;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl[ulp_type] == NULL) {
- printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
- "has not been registered\n", ulp_type);
+ pr_err("%s: Driver with type %d has not been registered\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EAGAIN;
}
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
- "been registered to this device\n", ulp_type);
+ pr_err("%s: Type %d has already been registered to this device\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
@@ -552,8 +556,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
@@ -561,8 +564,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
- printk(KERN_ERR PFX "cnic_unregister_device: device not "
- "registered to this ulp type %d\n", ulp_type);
+ pr_err("%s: device not registered to this ulp type %d\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EINVAL;
}
@@ -576,8 +579,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
i++;
}
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
- printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
- " to complete.\n", dev->netdev->name);
+ netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
return 0;
}
@@ -898,7 +900,8 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
- uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
+ uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
+ PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@@ -1101,10 +1104,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
- cp->bnx2x_status_blk = cp->status_blk;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
- memset(cp->bnx2x_status_blk, 0, sizeof(struct host_status_block));
+ memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
cp->l2_rx_ring_size = 15;
@@ -1865,8 +1867,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
}
if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
- printk(KERN_ERR PFX "%s: conn_buf size too big\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "conn_buf size too big\n");
return -ENOMEM;
}
conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
@@ -2026,13 +2027,13 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
break;
default:
ret = 0;
- printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n",
- dev->netdev->name, opcode);
+ netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+ opcode);
break;
}
if (ret < 0)
- printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n",
- dev->netdev->name, opcode);
+ netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+ opcode);
i += work;
}
return 0;
@@ -2074,8 +2075,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
goto end;
else {
- printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
- dev->netdev->name, kcqe_op_flag);
+ netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
+ kcqe_op_flag);
goto end;
}
@@ -2204,7 +2205,7 @@ static void cnic_service_bnx2_msix(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
- struct status_block_msix *status_blk = cp->bnx2_status_blk;
+ struct status_block_msix *status_blk = cp->status_blk.bnx2;
u32 status_idx = status_blk->status_idx;
u16 hw_prod, sw_prod;
int kcqe_cnt;
@@ -2250,7 +2251,7 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
if (cp->ack_int)
cp->ack_int(dev);
- prefetch(cp->status_blk);
+ prefetch(cp->status_blk.gen);
prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -2291,7 +2292,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
struct cnic_local *cp = dev->cnic_priv;
u16 hw_prod, sw_prod;
struct cstorm_status_block_c *sblk =
- &cp->bnx2x_status_blk->c_status_block;
+ &cp->status_blk.bnx2x->c_status_block;
u32 status_idx = sblk->status_block_index;
int kcqe_cnt;
@@ -2333,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
struct cnic_local *cp = dev->cnic_priv;
u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
- prefetch(cp->status_blk);
- prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+ if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+ prefetch(cp->status_blk.bnx2x);
+ prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
tasklet_schedule(&cp->cnic_irq_task);
-
- cnic_chk_pkt_rings(cp);
+ cnic_chk_pkt_rings(cp);
+ }
return 0;
}
@@ -2513,7 +2514,7 @@ static int cnic_cm_offload_pg(struct cnic_sock *csk)
l4kwqe->sa5 = dev->mac_addr[5];
l4kwqe->etype = ETH_P_IP;
- l4kwqe->ipid_count = DEF_IPID_COUNT;
+ l4kwqe->ipid_start = DEF_IPID_START;
l4kwqe->host_opaque = csk->l5_cid;
if (csk->vlan_id) {
@@ -2859,8 +2860,8 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
- int is_v6, err, rc = -ENETUNREACH;
- struct dst_entry *dst;
+ int is_v6, rc = 0;
+ struct dst_entry *dst = NULL;
struct net_device *realdev;
u32 local_port;
@@ -2876,39 +2877,31 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
clear_bit(SK_F_IPV6, &csk->flags);
if (is_v6) {
-#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
set_bit(SK_F_IPV6, &csk->flags);
- err = cnic_get_v6_route(&saddr->remote.v6, &dst);
- if (err)
- return err;
-
- if (!dst || dst->error || !dst->dev)
- goto err_out;
+ cnic_get_v6_route(&saddr->remote.v6, &dst);
memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
sizeof(struct in6_addr));
csk->dst_port = saddr->remote.v6.sin6_port;
local_port = saddr->local.v6.sin6_port;
-#else
- return rc;
-#endif
} else {
- err = cnic_get_v4_route(&saddr->remote.v4, &dst);
- if (err)
- return err;
-
- if (!dst || dst->error || !dst->dev)
- goto err_out;
+ cnic_get_v4_route(&saddr->remote.v4, &dst);
csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
csk->dst_port = saddr->remote.v4.sin_port;
local_port = saddr->local.v4.sin_port;
}
- csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
- if (realdev != dev->netdev)
- goto err_out;
+ csk->vlan_id = 0;
+ csk->mtu = dev->netdev->mtu;
+ if (dst && dst->dev) {
+ u16 vlan = cnic_get_vlan(dst->dev, &realdev);
+ if (realdev == dev->netdev) {
+ csk->vlan_id = vlan;
+ csk->mtu = dst_mtu(dst);
+ }
+ }
if (local_port >= CNIC_LOCAL_PORT_MIN &&
local_port < CNIC_LOCAL_PORT_MAX) {
@@ -2926,9 +2919,6 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
}
csk->src_port = local_port;
- csk->mtu = dst_mtu(dst);
- rc = 0;
-
err_out:
dst_release(dst);
return rc;
@@ -3052,6 +3042,14 @@ static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
goto done;
}
+ /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
+ if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ goto done;
+ }
+
csk->pg_cid = kcqe->pg_cid;
set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
cnic_cm_conn_req(csk);
@@ -3089,6 +3087,13 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
}
switch (opcode) {
+ case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
+ if (l4kcqe->status != 0) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ }
+ break;
case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
if (l4kcqe->status == 0)
set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
@@ -3099,7 +3104,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
break;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
- if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ cnic_cm_upcall(cp, csk, opcode);
+ break;
+ } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
csk->state = opcode;
/* fall through */
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
@@ -3163,6 +3171,16 @@ static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
return 1;
}
+ /* 57710+ only workaround to handle unsolicited RESET_COMP
+ * which will be treated like a RESET RCVD notification
+ * which triggers the clean up procedure
+ */
+ else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+ if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
+ csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+ return 1;
+ }
+ }
return 0;
}
@@ -3172,10 +3190,8 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
struct cnic_local *cp = dev->cnic_priv;
clear_bit(SK_F_CONNECT_START, &csk->flags);
- if (cnic_ready_to_close(csk, opcode)) {
- cnic_close_conn(csk);
- cnic_cm_upcall(cp, csk, opcode);
- }
+ cnic_close_conn(csk);
+ cnic_cm_upcall(cp, csk, opcode);
}
static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
@@ -3393,8 +3409,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
- cp->bnx2_status_blk = cp->status_blk;
- cp->last_status_idx = cp->bnx2_status_blk->status_idx;
+ cp->last_status_idx = cp->status_blk.bnx2->status_idx;
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
(unsigned long) dev);
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
@@ -3403,7 +3418,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
tasklet_disable(&cp->cnic_irq_task);
return err;
}
- while (cp->bnx2_status_blk->status_completion_producer_index &&
+ while (cp->status_blk.bnx2->status_completion_producer_index &&
i < 10) {
CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
1 << (11 + sblk_num));
@@ -3411,13 +3426,13 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
i++;
barrier();
}
- if (cp->bnx2_status_blk->status_completion_producer_index) {
+ if (cp->status_blk.bnx2->status_completion_producer_index) {
cnic_free_irq(dev);
goto failed;
}
} else {
- struct status_block *sblk = cp->status_blk;
+ struct status_block *sblk = cp->status_blk.gen;
u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
int i = 0;
@@ -3435,8 +3450,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
return 0;
failed:
- printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
return -EBUSY;
}
@@ -3475,7 +3489,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
int i;
struct tx_bd *txbd;
dma_addr_t buf_map;
- struct status_block *s_blk = cp->status_blk;
+ struct status_block *s_blk = cp->status_blk.gen;
sb_id = cp->status_blk_num;
tx_cid = 20;
@@ -3483,7 +3497,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
cnic_init_context(dev, tx_cid + 1);
cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
- struct status_block_msix *sblk = cp->status_blk;
+ struct status_block_msix *sblk = cp->status_blk.bnx2;
tx_cid = TX_TSS_CID + sb_id - 1;
cnic_init_context(dev, tx_cid);
@@ -3539,7 +3553,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
u32 cid_addr, sb_id, val, coal_reg, coal_val;
int i;
struct rx_bd *rxbd;
- struct status_block *s_blk = cp->status_blk;
+ struct status_block *s_blk = cp->status_blk.gen;
sb_id = cp->status_blk_num;
cnic_init_context(dev, 2);
@@ -3547,7 +3561,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
coal_reg = BNX2_HC_COMMAND;
coal_val = CNIC_RD(dev, coal_reg);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
- struct status_block_msix *sblk = cp->status_blk;
+ struct status_block_msix *sblk = cp->status_blk.bnx2;
cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
coal_reg = BNX2_HC_COALESCE_NOW;
@@ -3646,7 +3660,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
- struct status_block *sblk = cp->status_blk;
+ struct status_block *sblk = cp->status_blk.gen;
u32 val;
int err;
@@ -3758,8 +3772,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
err = cnic_init_bnx2_irq(dev);
if (err) {
- printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "cnic_init_irq failed\n");
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
return err;
@@ -4122,8 +4135,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
offsetof(struct cstorm_status_block_c,
index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
if (eq_idx != 0) {
- printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n",
- dev->netdev->name, eq_idx);
+ netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
return -EBUSY;
}
ret = cnic_init_bnx2x_irq(dev);
@@ -4208,8 +4220,7 @@ static int cnic_register_netdev(struct cnic_dev *dev)
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
if (err)
- printk(KERN_ERR PFX "%s: register_cnic failed\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "register_cnic failed\n");
return err;
}
@@ -4238,13 +4249,12 @@ static int cnic_start_hw(struct cnic_dev *dev)
cp->chip_id = ethdev->chip_id;
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
- cp->status_blk = ethdev->irq_arr[0].status_blk;
+ cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
if (err) {
- printk(KERN_ERR PFX "%s: allocate resource failure\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "allocate resource failure\n");
goto err1;
}
@@ -4326,10 +4336,9 @@ static void cnic_free_dev(struct cnic_dev *dev)
i++;
}
if (atomic_read(&dev->ref_count) != 0)
- printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
- " to zero.\n", dev->netdev->name);
+ netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
- printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
+ netdev_info(dev->netdev, "Removed CNIC device\n");
dev_put(dev->netdev);
kfree(dev);
}
@@ -4345,8 +4354,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev = kzalloc(alloc_size , GFP_KERNEL);
if (cdev == NULL) {
- printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
- dev->name);
+ netdev_err(dev, "allocate dev struct failure\n");
return NULL;
}
@@ -4364,7 +4372,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
spin_lock_init(&cp->cnic_ulp_lock);
- printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
+ netdev_info(dev, "Added CNIC device\n");
return cdev;
}
@@ -4605,7 +4613,7 @@ static int __init cnic_init(void)
{
int rc = 0;
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
rc = register_netdevice_notifier(&cnic_netdev_notifier);
if (rc) {
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 241d09acc0d4..a0d853dff983 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2009 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -101,7 +101,7 @@ struct cnic_redirect_entry {
#define BNX2X_KWQ_DATA(cp, x) \
&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
-#define DEF_IPID_COUNT 0xc001
+#define DEF_IPID_START 0x8000
#define DEF_KA_TIMEOUT 10000
#define DEF_KA_INTERVAL 300000
@@ -224,9 +224,12 @@ struct cnic_local {
u16 kcq_prod_idx;
u32 kcq_io_addr;
- void *status_blk;
- struct status_block_msix *bnx2_status_blk;
- struct host_status_block *bnx2x_status_blk;
+ union {
+ void *gen;
+ struct status_block_msix *bnx2;
+ struct host_status_block *bnx2x;
+ } status_blk;
+
struct host_def_status_block *bnx2x_def_status_blk;
u32 status_blk_num;
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 9827b278dc7c..7ce694d41b6b 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2009 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 8aaf98bdd4f7..110c62072e6f 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.1.0"
-#define CNIC_MODULE_RELDATE "Oct 10, 2009"
+#define CNIC_MODULE_VERSION "2.1.1"
+#define CNIC_MODULE_RELDATE "Feb 22, 2010"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 8d0be26f94e3..60777fd90b33 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
@@ -36,6 +37,7 @@
#include <linux/phy_fixed.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/clk.h>
#include <asm/gpio.h>
#include <asm/atomic.h>
@@ -54,9 +56,9 @@ module_param(dumb_switch, int, 0444);
MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
-#define CPMAC_VERSION "0.5.1"
-/* frame size + 802.1q tag */
-#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4)
+#define CPMAC_VERSION "0.5.2"
+/* frame size + 802.1q tag + FCS size */
+#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define CPMAC_QUEUES 8
/* Ethernet registers */
@@ -294,9 +296,16 @@ static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
static int cpmac_mdio_reset(struct mii_bus *bus)
{
+ struct clk *cpmac_clk;
+
+ cpmac_clk = clk_get(&bus->dev, "cpmac");
+ if (IS_ERR(cpmac_clk)) {
+ printk(KERN_ERR "unable to get cpmac clock\n");
+ return -1;
+ }
ar7_device_reset(AR7_RESET_BIT_MDIO);
cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
- MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1));
+ MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
return 0;
}
@@ -320,7 +329,6 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
static void cpmac_set_multicast_list(struct net_device *dev)
{
struct dev_mc_list *iter;
- int i;
u8 tmp;
u32 mbp, bit, hash[2] = { 0, };
struct cpmac_priv *priv = netdev_priv(dev);
@@ -340,8 +348,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- for (i = 0, iter = dev->mc_list; i < dev->mc_count;
- i++, iter = iter->next) {
+ netdev_for_each_mc_addr(iter, dev) {
bit = 0;
tmp = iter->dmi_addr[0];
bit ^= (tmp >> 2) ^ (tmp << 4);
@@ -1130,8 +1137,9 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
}
if (phy_id == PHY_MAX_ADDR) {
- dev_err(&pdev->dev, "no PHY present\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "no PHY present, falling back to switch on MDIO bus 0\n");
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ phy_id = pdev->id;
}
dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
@@ -1284,8 +1292,8 @@ void __devexit cpmac_exit(void)
{
platform_driver_unregister(&cpmac_driver);
mdiobus_unregister(cpmac_mii);
- mdiobus_free(cpmac_mii);
iounmap(cpmac_mii->priv);
+ mdiobus_free(cpmac_mii);
}
module_init(cpmac_init);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index a24be34a3f7a..61a33914e96f 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -18,7 +18,6 @@
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
@@ -1564,7 +1563,7 @@ static void
set_multicast_list(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
- int num_addr = dev->mc_count;
+ int num_addr = netdev_mc_count(dev);
unsigned long int lo_bits;
unsigned long int hi_bits;
@@ -1596,13 +1595,12 @@ set_multicast_list(struct net_device *dev)
} else {
/* MC mode, receive normal and MC packets */
char hash_ix;
- struct dev_mc_list *dmi = dev->mc_list;
- int i;
+ struct dev_mc_list *dmi;
char *baddr;
lo_bits = 0x00000000ul;
hi_bits = 0x00000000ul;
- for (i = 0; i < num_addr; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
/* Calculate the hash index for the GA registers */
hash_ix = 0;
@@ -1632,7 +1630,6 @@ set_multicast_list(struct net_device *dev)
} else {
lo_bits |= (1 << hash_ix);
}
- dmi = dmi->next;
}
/* Disable individual receive */
SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index af9321617ce4..4c38491b8efb 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -138,12 +138,12 @@
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -580,7 +580,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
}
#ifdef CONFIG_SH_HICOSH4
- /* truely reset the chip */
+ /* truly reset the chip */
writeword(ioaddr, ADD_PORT, 0x0114);
writeword(ioaddr, DATA_PORT, 0x0040);
#endif
@@ -1325,8 +1325,7 @@ net_open(struct net_device *dev)
write_irq(dev, lp->chip_type, dev->irq);
ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
if (ret) {
- if (net_debug)
- printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
+ printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
goto bad_out;
}
}
@@ -1786,7 +1785,7 @@ static void set_multicast_list(struct net_device *dev)
{
lp->rx_mode = RX_ALL_ACCEPT;
}
- else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
{
/* The multicast-accept list is initialized to accept-all, and we
rely on higher-level filtering for now. */
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efbc..4cd7f420766a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
struct work_struct fatal_error_handler_task;
struct work_struct link_fault_handler_task;
+ struct work_struct db_full_task;
+ struct work_struct db_empty_task;
+ struct work_struct db_drop_task;
+
struct dentry *debugfs_root;
struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
+extern struct workqueue_struct *cxgb3_wq;
int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 5248f9e0b2f4..35cd36729155 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops,
+ cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
"10GBASE-CX4");
return 0;
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 6ff356d4c7ab..fe08a004b0dd 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -67,32 +67,6 @@
/* Additional NETIF_MSG_* categories */
#define NETIF_MSG_MMIO 0x8000000
-struct t3_rx_mode {
- struct net_device *dev;
- struct dev_mc_list *mclist;
- unsigned int idx;
-};
-
-static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
- struct dev_mc_list *mclist)
-{
- p->dev = dev;
- p->mclist = mclist;
- p->idx = 0;
-}
-
-static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
-{
- u8 *addr = NULL;
-
- if (rm->mclist && rm->idx < rm->dev->mc_count) {
- addr = rm->mclist->dmi_addr;
- rm->mclist = rm->mclist->next;
- rm->idx++;
- }
- return addr;
-}
-
enum {
MAX_NPORTS = 2, /* max # of ports */
MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
@@ -746,7 +720,7 @@ void t3_mac_enable_exact_filters(struct cmac *mac);
int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
-int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c141..e3f1b8566495 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,8 @@
#include <linux/firmware.h>
#include <linux/log2.h>
#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
#include "common.h"
@@ -80,7 +82,7 @@ enum {
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static const struct pci_device_id cxgb3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
CH_DEVICE(0x20, 0), /* PE9000 */
CH_DEVICE(0x21, 1), /* T302E */
CH_DEVICE(0x22, 2), /* T310E */
@@ -140,7 +142,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
* will block keventd as it needs the rtnl lock, and we'll deadlock waiting
* for our work to complete. Get our own work queue to solve this.
*/
-static struct workqueue_struct *cxgb3_wq;
+struct workqueue_struct *cxgb3_wq;
/**
* link_report - show link status and link speed/duplex
@@ -324,11 +326,9 @@ void t3_os_phymod_changed(struct adapter *adap, int port_id)
static void cxgb_set_rxmode(struct net_device *dev)
{
- struct t3_rx_mode rm;
struct port_info *pi = netdev_priv(dev);
- init_rx_mode(&rm, dev, dev->mc_list);
- t3_mac_set_rx_mode(&pi->mac, &rm);
+ t3_mac_set_rx_mode(&pi->mac, dev);
}
/**
@@ -339,17 +339,15 @@ static void cxgb_set_rxmode(struct net_device *dev)
*/
static void link_start(struct net_device *dev)
{
- struct t3_rx_mode rm;
struct port_info *pi = netdev_priv(dev);
struct cmac *mac = &pi->mac;
- init_rx_mode(&rm, dev, dev->mc_list);
t3_mac_reset(mac);
t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
t3_mac_set_mtu(mac, dev->mtu);
t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
- t3_mac_set_rx_mode(mac, &rm);
+ t3_mac_set_rx_mode(mac, dev);
t3_link_start(&pi->phy, mac, &pi->link_config);
t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
}
@@ -441,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
unsigned long n)
{
- int attempts = 5;
+ int attempts = 10;
while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
if (!--attempts)
@@ -590,6 +588,19 @@ static void setup_rss(struct adapter *adap)
V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
}
+static void ring_dbs(struct adapter *adap)
+{
+ int i, j;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ for (j = 0; j < SGE_TXQ_PER_SET; j++)
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
+ }
+}
+
static void init_napi(struct adapter *adap)
{
int i;
@@ -1284,6 +1295,7 @@ static void cxgb_down(struct adapter *adapter)
free_irq_resources(adapter);
quiesce_rx(adapter);
+ t3_sge_stop(adapter);
flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
}
@@ -2754,6 +2766,42 @@ static void t3_adap_check_task(struct work_struct *work)
spin_unlock_irq(&adapter->work_lock);
}
+static void db_full_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_full_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
+}
+
+static void db_empty_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_empty_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
+}
+
+static void db_drop_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_drop_task);
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
+
+ /*
+ * Sleep a while before ringing the driver qset dbs.
+ * The delay is between 1000-2023 usecs.
+ */
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(delay));
+ ring_dbs(adapter);
+}
+
/*
* Processes external (PHY) interrupts in process context.
*/
@@ -3222,6 +3270,11 @@ static int __devinit init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&adapter->adapter_list);
INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
+
+ INIT_WORK(&adapter->db_full_task, db_full_task);
+ INIT_WORK(&adapter->db_empty_task, db_empty_task);
+ INIT_WORK(&adapter->db_drop_task, db_drop_task);
+
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 75064eea1d87..c6485b39eb0e 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -31,6 +31,7 @@
*/
#include <linux/list.h>
+#include <linux/slab.h>
#include <net/neighbour.h>
#include <linux/notifier.h>
#include <asm/atomic.h>
@@ -1252,7 +1253,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
struct mtutab mtutab;
unsigned int l2t_capacity;
- t = kcalloc(1, sizeof(*t), GFP_KERNEL);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042da..929c298115ca 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
OFFLOAD_STATUS_UP,
OFFLOAD_STATUS_DOWN,
OFFLOAD_PORT_DOWN,
- OFFLOAD_PORT_UP
+ OFFLOAD_PORT_UP,
+ OFFLOAD_DB_FULL,
+ OFFLOAD_DB_EMPTY,
+ OFFLOAD_DB_DROP
};
struct cxgb3_client {
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index ff1611f90e7a..2f3ee721c3e1 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -34,6 +34,7 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
+#include <linux/slab.h>
#include <net/neighbour.h>
#include "common.h"
#include "t3cdev.h"
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a965..cb42353c9fdd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
#define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..07d7e7fab3f5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -36,12 +36,14 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <net/arp.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
+#include "cxgb3_offload.h"
#define USE_GTS 0
@@ -196,13 +198,13 @@ static inline void refill_rspq(struct adapter *adapter,
/**
* need_skb_unmap - does the platform need unmapping of sk_buffs?
*
- * Returns true if the platfrom needs sk_buff unmapping. The compiler
+ * Returns true if the platform needs sk_buff unmapping. The compiler
* optimizes away unecessary code if this returns true.
*/
static inline int need_skb_unmap(void)
{
/*
- * This structure is used to tell if the platfrom needs buffer
+ * This structure is used to tell if the platform needs buffer
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
*/
struct dummy {
@@ -480,6 +482,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= q->credits / 4) {
q->pend_cred = 0;
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
}
@@ -2079,6 +2082,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct sge_fl *fl, int len, int complete)
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
struct skb_frag_struct *rx_frag;
@@ -2116,11 +2120,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (!nr_frags) {
offset = 2 + sizeof(struct cpl_rx_pkt);
- qs->lro_va = sd->pg_chunk.va + 2;
- }
- len -= offset;
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
+
+ if ((pi->rx_offload & T3_RX_CSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
- prefetch(qs->lro_va);
+ len -= offset;
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2147,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
return;
skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
- struct net_device *dev = qs->netdev;
- struct port_info *pi = netdev_priv(dev);
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
@@ -2282,11 +2289,14 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
while (likely(budget_left && is_new_response(r, q))) {
int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
struct sk_buff *skb = NULL;
- u32 len, flags = ntohl(r->flags);
- __be32 rss_hi = *(const __be32 *)r,
- rss_lo = r->rss_hdr.rss_hash_val;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+ rmb();
eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@ -2497,7 +2507,10 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
refill_rspq(adap, q, q->credits);
q->credits = 0;
}
- } while (is_new_response(r, q) && is_pure_response(r));
+ if (!is_new_response(r, q))
+ break;
+ rmb();
+ } while (is_pure_response(r));
if (sleeping)
check_ring_db(adap, qs, sleeping);
@@ -2531,6 +2544,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
if (!is_new_response(r, q))
return -1;
+ rmb();
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
@@ -2829,8 +2843,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
}
if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
- CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
- status & F_HIPIODRBDROPERR ? "high" : "lo");
+ queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+ if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+ queue_work(cxgb3_wq, &adapter->db_full_task);
+
+ if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+ queue_work(cxgb3_wq, &adapter->db_empty_task);
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 032cfe065570..95a8ba0759f1 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1262,7 +1262,8 @@ void t3_link_changed(struct adapter *adapter, int port_id)
lc->fc = fc;
}
- t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+ t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
+ speed, duplex, fc);
}
void t3_link_fault(struct adapter *adapter, int port_id)
@@ -1432,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
- F_HIRCQPARITYERROR)
+ F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
+ F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
+ F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
+ F_LOPIODRBDROPERR)
#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
F_NFASRCHFAIL)
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index 0109ee4f2f91..c142a2132e9f 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -297,29 +297,30 @@ static int hash_hw_addr(const u8 * addr)
return hash;
}
-int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
{
u32 val, hash_lo, hash_hi;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
- if (rm->dev->flags & IFF_PROMISC)
+ if (dev->flags & IFF_PROMISC)
val |= F_COPYALLFRAMES;
t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
- if (rm->dev->flags & IFF_ALLMULTI)
+ if (dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
- u8 *addr;
+ struct dev_mc_list *dmi;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
- while ((addr = t3_get_next_mcaddr(rm)))
+ netdev_for_each_mc_addr(dmi, dev)
if (exact_addr_idx < EXACT_ADDR_FILTERS)
- set_addr_filter(mac, exact_addr_idx++, addr);
+ set_addr_filter(mac, exact_addr_idx++,
+ dmi->dmi_addr);
else {
- int hash = hash_hw_addr(addr);
+ int hash = hash_hw_addr(dmi->dmi_addr);
if (hash < 32)
hash_lo |= (1 << hash);
@@ -353,6 +354,9 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
* packet size register includes header, but not FCS.
*/
mtu += 14;
+ if (mtu > 1536)
+ mtu += 4;
+
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile
new file mode 100644
index 000000000000..498667487f52
--- /dev/null
+++ b/drivers/net/cxgb4/Makefile
@@ -0,0 +1,7 @@
+#
+# Chelsio T4 driver
+#
+
+obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
+
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
new file mode 100644
index 000000000000..3d8ff4889b56
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -0,0 +1,741 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_H__
+#define __CXGB4_H__
+
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <asm/io.h>
+#include "cxgb4_uld.h"
+#include "t4_hw.h"
+
+#define FW_VERSION_MAJOR 1
+#define FW_VERSION_MINOR 1
+#define FW_VERSION_MICRO 0
+
+enum {
+ MAX_NPORTS = 4, /* max # of ports */
+ SERNUM_LEN = 16, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+};
+
+enum {
+ MEM_EDC0,
+ MEM_EDC1,
+ MEM_MC
+};
+
+enum dev_master {
+ MASTER_CANT,
+ MASTER_MAY,
+ MASTER_MUST
+};
+
+enum dev_state {
+ DEV_STATE_UNINIT,
+ DEV_STATE_INIT,
+ DEV_STATE_ERR
+};
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+struct port_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_bcast_frames; /* all broadcast frames */
+ u64 tx_mcast_frames; /* all multicast frames */
+ u64 tx_ucast_frames; /* all unicast frames */
+ u64 tx_error_frames; /* all error frames */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 tx_drop; /* # of dropped Tx frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
+ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
+ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
+ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
+ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
+ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
+ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
+ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_bcast_frames; /* all broadcast frames */
+ u64 rx_mcast_frames; /* all multicast frames */
+ u64 rx_ucast_frames; /* all unicast frames */
+ u64 rx_too_long; /* # of frames exceeding MTU */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_fcs_err; /* # of received frames with bad FCS */
+ u64 rx_len_err; /* # of received frames with length error */
+ u64 rx_symbol_err; /* symbol errors */
+ u64 rx_runt; /* # of short frames */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_ppp0; /* # of received PPP prio 0 frames */
+ u64 rx_ppp1; /* # of received PPP prio 1 frames */
+ u64 rx_ppp2; /* # of received PPP prio 2 frames */
+ u64 rx_ppp3; /* # of received PPP prio 3 frames */
+ u64 rx_ppp4; /* # of received PPP prio 4 frames */
+ u64 rx_ppp5; /* # of received PPP prio 5 frames */
+ u64 rx_ppp6; /* # of received PPP prio 6 frames */
+ u64 rx_ppp7; /* # of received PPP prio 7 frames */
+
+ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
+ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
+ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
+ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
+ u64 rx_trunc0; /* buffer-group 0 truncated packets */
+ u64 rx_trunc1; /* buffer-group 1 truncated packets */
+ u64 rx_trunc2; /* buffer-group 2 truncated packets */
+ u64 rx_trunc3; /* buffer-group 3 truncated packets */
+};
+
+struct lb_port_stats {
+ u64 octets;
+ u64 frames;
+ u64 bcast_frames;
+ u64 mcast_frames;
+ u64 ucast_frames;
+ u64 error_frames;
+
+ u64 frames_64;
+ u64 frames_65_127;
+ u64 frames_128_255;
+ u64 frames_256_511;
+ u64 frames_512_1023;
+ u64 frames_1024_1518;
+ u64 frames_1519_max;
+
+ u64 drop;
+
+ u64 ovflow0;
+ u64 ovflow1;
+ u64 ovflow2;
+ u64 ovflow3;
+ u64 trunc0;
+ u64 trunc1;
+ u64 trunc2;
+ u64 trunc3;
+};
+
+struct tp_tcp_stats {
+ u32 tcpOutRsts;
+ u64 tcpInSegs;
+ u64 tcpOutSegs;
+ u64 tcpRetransSegs;
+};
+
+struct tp_err_stats {
+ u32 macInErrs[4];
+ u32 hdrInErrs[4];
+ u32 tcpInErrs[4];
+ u32 tnlCongDrops[4];
+ u32 ofldChanDrops[4];
+ u32 tnlTxDrops[4];
+ u32 ofldVlanDrops[4];
+ u32 tcp6InErrs[4];
+ u32 ofldNoNeigh;
+ u32 ofldCongDefer;
+};
+
+struct tp_params {
+ unsigned int ntxchan; /* # of Tx channels */
+ unsigned int tre; /* log2 of core clocks per TP tick */
+};
+
+struct vpd_params {
+ unsigned int cclk;
+ u8 ec[EC_LEN + 1];
+ u8 sn[SERNUM_LEN + 1];
+ u8 id[ID_LEN + 1];
+};
+
+struct pci_params {
+ unsigned char speed;
+ unsigned char width;
+};
+
+struct adapter_params {
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+
+ unsigned int fw_vers;
+ unsigned int tp_vers;
+ u8 api_vers[7];
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned char nports; /* # of ethernet ports */
+ unsigned char portvec;
+ unsigned char rev; /* chip revision */
+ unsigned char offload;
+
+ unsigned int ofldq_wr_cred;
+};
+
+struct trace_params {
+ u32 data[TRACE_LEN / 4];
+ u32 mask[TRACE_LEN / 4];
+ unsigned short snap_len;
+ unsigned short min_len;
+ unsigned char skip_ofst;
+ unsigned char skip_len;
+ unsigned char invert;
+ unsigned char port;
+};
+
+struct link_config {
+ unsigned short supported; /* link capabilities */
+ unsigned short advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned char link_ok; /* link up? */
+};
+
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+enum {
+ MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
+ MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
+ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
+ MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
+};
+
+enum {
+ MAX_EGRQ = 128, /* max # of egress queues, including FLs */
+ MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
+};
+
+struct adapter;
+struct vlan_group;
+struct sge_rspq;
+
+struct port_info {
+ struct adapter *adapter;
+ struct vlan_group *vlan_grp;
+ u16 viid;
+ s16 xact_addr_filt; /* index of exact MAC address filter */
+ u16 rss_size; /* size of VI's RSS table slice */
+ s8 mdio_addr;
+ u8 port_type;
+ u8 mod_type;
+ u8 port_id;
+ u8 tx_chan;
+ u8 lport; /* associated offload logical port */
+ u8 rx_offload; /* CSO, etc */
+ u8 nqsets; /* # of qsets */
+ u8 first_qset; /* index of first qset */
+ struct link_config link_cfg;
+};
+
+/* port_info.rx_offload flags */
+enum {
+ RX_CSO = 1 << 0,
+};
+
+struct dentry;
+struct work_struct;
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ QUEUES_BOUND = (1 << 3),
+ FW_OK = (1 << 4),
+};
+
+struct rx_sw_desc;
+
+struct sge_fl { /* SGE free-buffer queue state */
+ unsigned int avail; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
+ unsigned long large_alloc_failed;
+ unsigned long starving;
+ /* RO fields */
+ unsigned int cntxt_id; /* SGE context id for the free list */
+ unsigned int size; /* capacity of free list */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+ __be64 *desc; /* address of HW Rx descriptor ring */
+ dma_addr_t addr; /* bus address of HW ring start */
+};
+
+/* A packet gather list */
+struct pkt_gl {
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+
+struct sge_rspq { /* state for an SGE response queue */
+ struct napi_struct napi;
+ const __be64 *cur_desc; /* current descriptor in queue */
+ unsigned int cidx; /* consumer index */
+ u8 gen; /* current generation bit */
+ u8 intr_params; /* interrupt holdoff parameters */
+ u8 next_intr_params; /* holdoff params for next interrupt */
+ u8 pktcnt_idx; /* interrupt packet threshold */
+ u8 uld; /* ULD handling this queue */
+ u8 idx; /* queue index within its group */
+ int offset; /* offset into current Rx buffer */
+ u16 cntxt_id; /* SGE context id for the response q */
+ u16 abs_id; /* absolute SGE id for the response q */
+ __be64 *desc; /* address of HW response ring */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ unsigned int iqe_len; /* entry size */
+ unsigned int size; /* capacity of response queue */
+ struct adapter *adap;
+ struct net_device *netdev; /* associated net device */
+ rspq_handler_t handler;
+};
+
+struct sge_eth_stats { /* Ethernet queue statistics */
+ unsigned long pkts; /* # of ethernet packets */
+ unsigned long lro_pkts; /* # of LRO super packets */
+ unsigned long lro_merged; /* # of wire packets merged by LRO */
+ unsigned long rx_cso; /* # of Rx checksum offloads */
+ unsigned long vlan_ex; /* # of Rx VLAN extractions */
+ unsigned long rx_drops; /* # of packets dropped due to no mem */
+};
+
+struct sge_eth_rxq { /* SW Ethernet Rx queue */
+ struct sge_rspq rspq;
+ struct sge_fl fl;
+ struct sge_eth_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_stats { /* offload queue statistics */
+ unsigned long pkts; /* # of packets */
+ unsigned long imm; /* # of immediate-data packets */
+ unsigned long an; /* # of asynchronous notifications */
+ unsigned long nomem; /* # of responses deferred due to no mem */
+};
+
+struct sge_ofld_rxq { /* SW offload Rx queue */
+ struct sge_rspq rspq;
+ struct sge_fl fl;
+ struct sge_ofld_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct tx_desc {
+ __be64 flit[8];
+};
+
+struct tx_sw_desc;
+
+struct sge_txq {
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int cidx; /* SW consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long stops; /* # of times q has been stopped */
+ unsigned long restarts; /* # of queue restarts */
+ unsigned int cntxt_id; /* SGE context id for the Tx q */
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ struct sge_qstat *stat; /* queue status entry */
+ dma_addr_t phys_addr; /* physical address of the ring */
+};
+
+struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
+ struct sge_txq q;
+ struct netdev_queue *txq; /* associated netdev TX queue */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long tx_cso; /* # of Tx checksum offloads */
+ unsigned long vlan_ins; /* # of Tx VLAN insertions */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_txq { /* state for an SGE offload Tx queue */
+ struct sge_txq q;
+ struct adapter *adap;
+ struct sk_buff_head sendq; /* list of backpressured packets */
+ struct tasklet_struct qresume_tsk; /* restarts the queue */
+ u8 full; /* the Tx ring is full */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ctrl_txq { /* state for an SGE control Tx queue */
+ struct sge_txq q;
+ struct adapter *adap;
+ struct sk_buff_head sendq; /* list of backpressured packets */
+ struct tasklet_struct qresume_tsk; /* restarts the queue */
+ u8 full; /* the Tx ring is full */
+} ____cacheline_aligned_in_smp;
+
+struct sge {
+ struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
+ struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
+
+ struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+ struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
+ struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
+ struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+ struct sge_rspq intrq ____cacheline_aligned_in_smp;
+ spinlock_t intrq_lock;
+
+ u16 max_ethqsets; /* # of available Ethernet queue sets */
+ u16 ethqsets; /* # of active Ethernet queue sets */
+ u16 ethtxq_rover; /* Tx queue to clean up next */
+ u16 ofldqsets; /* # of active offload queue sets */
+ u16 rdmaqs; /* # of available RDMA Rx queues */
+ u16 ofld_rxq[MAX_OFLD_QSETS];
+ u16 rdma_rxq[NCHAN];
+ u16 timer_val[SGE_NTIMERS];
+ u8 counter_val[SGE_NCOUNTERS];
+ unsigned int starve_thres;
+ u8 idma_state[2];
+ void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
+ struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
+ DECLARE_BITMAP(starving_fl, MAX_EGRQ);
+ DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
+ struct timer_list rx_timer; /* refills starving FLs */
+ struct timer_list tx_timer; /* checks Tx queues */
+};
+
+#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
+#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
+#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
+
+struct l2t_data;
+
+struct adapter {
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ struct device *pdev_dev;
+ unsigned long registered_device_map;
+ unsigned long open_device_map;
+ unsigned long flags;
+
+ const char *name;
+ int msg_enable;
+
+ struct adapter_params params;
+ struct cxgb4_virt_res vres;
+ unsigned int swintr;
+
+ unsigned int wol;
+
+ struct {
+ unsigned short vec;
+ char desc[14];
+ } msix_info[MAX_INGQ + 1];
+
+ struct sge sge;
+
+ struct net_device *port[MAX_NPORTS];
+ u8 chan_map[NCHAN]; /* channel -> port map */
+
+ struct l2t_data *l2t;
+ void *uld_handle[CXGB4_ULD_MAX];
+ struct list_head list_node;
+
+ struct tid_info tids;
+ void **tid_release_head;
+ spinlock_t tid_release_lock;
+ struct work_struct tid_release_task;
+ bool tid_release_task_busy;
+
+ struct dentry *debugfs_root;
+
+ spinlock_t stats_lock;
+};
+
+static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
+{
+ return readl(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
+{
+ writel(val, adap->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
+{
+ return readq(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
+{
+ writeq(val, adap->regs + reg_addr);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+ return netdev_priv(adap->port[idx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->adapter;
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+
+void *t4_alloc_mem(size_t size);
+void t4_free_mem(void *addr);
+
+void t4_free_sge_resources(struct adapter *adap);
+irq_handler_t t4_intr_handler(struct adapter *adap);
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct net_device *dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd);
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *netdevq,
+ unsigned int iqid);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int cmplqid);
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+ struct net_device *dev, unsigned int iqid);
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
+void t4_sge_init(struct adapter *adap);
+void t4_sge_start(struct adapter *adap);
+void t4_sge_stop(struct adapter *adap);
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+ unsigned int us)
+{
+ return (us * adap->params.vpd.cclk) / 1000;
+}
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+ u32 val);
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+void t4_intr_clear(struct adapter *adapter);
+int t4_slow_intr_handler(struct adapter *adapter);
+
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc);
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_seeprom_wp(struct adapter *adapter, bool enable);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented);
+int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_check_fw_version(struct adapter *adapter);
+int t4_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+void t4_fatal_err(struct adapter *adapter);
+void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+ int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+ int filter_index, int *enabled);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+ unsigned int flags);
+int t4_read_rss(struct adapter *adapter, u16 *entries);
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+ u64 *parity);
+
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
+
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta);
+
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+ const u8 *addr);
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+ u64 mask0, u64 mask1, unsigned int crc, bool enable);
+
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_early_init(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val);
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+ unsigned int rxqi, unsigned int rxq, unsigned int tc,
+ unsigned int vi, unsigned int cmask, unsigned int pmask,
+ unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool free, unsigned int naddr,
+ const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en);
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ unsigned int nblinks);
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 *valp);
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+#endif /* __CXGB4_H__ */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
new file mode 100644
index 000000000000..a7e30a23d322
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -0,0 +1,3388 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sockios.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <asm/uaccess.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "l2t.h"
+
+#define DRV_VERSION "1.0.0-ko"
+#define DRV_DESC "Chelsio T4 Network Driver"
+
+/*
+ * Max interrupt hold-off timer value in us. Queues fall back to this value
+ * under extreme memory pressure so it's largish to give the system time to
+ * recover.
+ */
+#define MAX_SGE_TIMERVAL 200U
+
+enum {
+ MEMWIN0_APERTURE = 65536,
+ MEMWIN0_BASE = 0x30000,
+ MEMWIN1_APERTURE = 32768,
+ MEMWIN1_BASE = 0x28000,
+ MEMWIN2_APERTURE = 2048,
+ MEMWIN2_BASE = 0x1b800,
+};
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_CTRL_TXQ_ENTRIES = 1024,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+ MIN_TXQ_ENTRIES = 32,
+ MIN_CTRL_TXQ_ENTRIES = 32,
+ MIN_RSPQ_ENTRIES = 128,
+ MIN_FL_ENTRIES = 16
+};
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
+
+static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
+ CH_DEVICE(0xa000), /* PE10K */
+ { 0, }
+};
+
+#define FW_FNAME "cxgb4/t4fw.bin"
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
+MODULE_FIRMWARE(FW_FNAME);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and INTx interrupts
+ * msi = 0: force INTx interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
+
+/*
+ * Queue interrupt hold-off timer values. Queues default to the first of these
+ * upon creation.
+ */
+static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
+
+module_param_array(intr_holdoff, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
+ "0..4 in microseconds");
+
+static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
+
+module_param_array(intr_cnt, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_cnt,
+ "thresholds 1..3 for queue interrupt packet counters");
+
+static int vf_acls;
+
+#ifdef CONFIG_PCI_IOV
+module_param(vf_acls, bool, 0644);
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+
+static unsigned int num_vf[4];
+
+module_param_array(num_vf, uint, NULL, 0644);
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+#endif
+
+static struct dentry *cxgb4_debugfs_root;
+
+static LIST_HEAD(adapter_list);
+static DEFINE_MUTEX(uld_mutex);
+static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
+static const char *uld_str[] = { "RDMA", "iSCSI" };
+
+static void link_report(struct net_device *dev)
+{
+ if (!netif_carrier_ok(dev))
+ netdev_info(dev, "link down\n");
+ else {
+ static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
+
+ const char *s = "10Mbps";
+ const struct port_info *p = netdev_priv(dev);
+
+ switch (p->link_cfg.speed) {
+ case SPEED_10000:
+ s = "10Gbps";
+ break;
+ case SPEED_1000:
+ s = "1000Mbps";
+ break;
+ case SPEED_100:
+ s = "100Mbps";
+ break;
+ }
+
+ netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
+ fc[p->link_cfg.fc]);
+ }
+}
+
+void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
+{
+ struct net_device *dev = adapter->port[port_id];
+
+ /* Skip changes from disabled ports. */
+ if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
+ if (link_stat)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ link_report(dev);
+ }
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+ static const char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA"
+ };
+
+ const struct net_device *dev = adap->port[port_id];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ netdev_info(dev, "port module unplugged\n");
+ else
+ netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+ u64 mhash = 0;
+ u64 uhash = 0;
+ bool free = true;
+ u16 filt_idx[7];
+ const u8 *addr[7];
+ int ret, naddr = 0;
+ const struct dev_addr_list *d;
+ const struct netdev_hw_addr *ha;
+ int uc_cnt = netdev_uc_count(dev);
+ const struct port_info *pi = netdev_priv(dev);
+
+ /* first do the secondary unicast addresses */
+ netdev_for_each_uc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+ ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+ naddr, addr, filt_idx, &uhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ naddr = 0;
+ }
+ }
+
+ /* next set up the multicast addresses */
+ netdev_for_each_mc_addr(d, dev) {
+ addr[naddr++] = d->dmi_addr;
+ if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
+ ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+ naddr, addr, filt_idx, &mhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ naddr = 0;
+ }
+ }
+
+ return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
+ uhash | mhash, sleep);
+}
+
+/*
+ * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ ret = set_addr_filters(dev, sleep_ok);
+ if (ret == 0)
+ ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
+ (dev->flags & IFF_PROMISC) ? 1 : 0,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+ sleep_ok);
+ return ret;
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the port to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+static int link_start(struct net_device *dev)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly.
+ */
+ ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
+ true);
+ if (ret == 0) {
+ ret = t4_change_mac(pi->adapter, 0, pi->viid,
+ pi->xact_addr_filt, dev->dev_addr, true,
+ false);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+ if (ret == 0)
+ ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
+ if (ret == 0)
+ ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
+ return ret;
+}
+
+/*
+ * Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ rsp++; /* skip RSS header */
+ if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+ const struct cpl_sge_egr_update *p = (void *)rsp;
+ unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+ struct sge_txq *txq = q->adap->sge.egr_map[qid];
+
+ txq->restarts++;
+ if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
+ struct sge_eth_txq *eq;
+
+ eq = container_of(txq, struct sge_eth_txq, q);
+ netif_tx_wake_queue(eq->txq);
+ } else {
+ struct sge_ofld_txq *oq;
+
+ oq = container_of(txq, struct sge_ofld_txq, q);
+ tasklet_schedule(&oq->qresume_tsk);
+ }
+ } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+ const struct cpl_fw6_msg *p = (void *)rsp;
+
+ if (p->type == 0)
+ t4_handle_fw_rpl(q->adap, p->data);
+ } else if (opcode == CPL_L2T_WRITE_RPL) {
+ const struct cpl_l2t_write_rpl *p = (void *)rsp;
+
+ do_l2t_write_rpl(q->adap, p);
+ } else
+ dev_err(q->adap->pdev_dev,
+ "unexpected CPL %#x on FW event queue\n", opcode);
+ return 0;
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
+static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+
+ if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+ rxq->stats.nomem++;
+ return -1;
+ }
+ if (gl == NULL)
+ rxq->stats.imm++;
+ else if (gl == CXGB4_MSG_AN)
+ rxq->stats.an++;
+ else
+ rxq->stats.pkts++;
+ return 0;
+}
+
+static void disable_msi(struct adapter *adapter)
+{
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
+ }
+}
+
+/*
+ * Interrupt handler for non-data events used with MSI-X.
+ */
+static irqreturn_t t4_nondata_intr(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
+ if (v & PFSW) {
+ adap->swintr = 1;
+ t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+ }
+ t4_slow_intr_handler(adap);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+ int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
+
+ /* non-data interrupts */
+ snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
+ adap->msix_info[0].desc[n] = 0;
+
+ /* FW events */
+ snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
+ adap->msix_info[1].desc[n] = 0;
+
+ /* Ethernet queues */
+ for_each_port(adap, j) {
+ struct net_device *d = adap->port[j];
+ const struct port_info *pi = netdev_priv(d);
+
+ for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+ snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
+ d->name, i);
+ adap->msix_info[msi_idx].desc[n] = 0;
+ }
+ }
+
+ /* offload queues */
+ for_each_ofldrxq(&adap->sge, i) {
+ snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
+ adap->name, i);
+ adap->msix_info[msi_idx++].desc[n] = 0;
+ }
+ for_each_rdmarxq(&adap->sge, i) {
+ snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
+ adap->name, i);
+ adap->msix_info[msi_idx++].desc[n] = 0;
+ }
+}
+
+static int request_msix_queue_irqs(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
+
+ err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[1].desc, &s->fw_evtq);
+ if (err)
+ return err;
+
+ for_each_ethrxq(s, ethqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->ethrxq[ethqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ for_each_ofldrxq(s, ofldqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->ofldrxq[ofldqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ for_each_rdmarxq(s, rdmaqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->rdmarxq[rdmaqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ return 0;
+
+unwind:
+ while (--rdmaqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec,
+ &s->rdmarxq[rdmaqidx].rspq);
+ while (--ofldqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec,
+ &s->ofldrxq[ofldqidx].rspq);
+ while (--ethqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
+ free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ return err;
+}
+
+static void free_msix_queue_irqs(struct adapter *adap)
+{
+ int i, msi = 2;
+ struct sge *s = &adap->sge;
+
+ free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ for_each_ethrxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
+ for_each_ofldrxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
+ for_each_rdmarxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adap: the adapter
+ *
+ * Sets up RSS to distribute packets to multiple receive queues. We
+ * configure the RSS CPU lookup table to distribute to the number of HW
+ * receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each port.
+ * We always configure the RSS mapping for all ports since the mapping
+ * table has plenty of entries.
+ */
+static int setup_rss(struct adapter *adap)
+{
+ int i, j, err;
+ u16 rss[MAX_ETH_QSETS];
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+ const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+ for (j = 0; j < pi->nqsets; j++)
+ rss[j] = q[j].rspq.abs_id;
+
+ err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
+ rss, pi->nqsets);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ struct sge_rspq *q = adap->sge.ingr_map[i];
+
+ if (q && q->handler)
+ napi_disable(&q->napi);
+ }
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ struct sge_rspq *q = adap->sge.ingr_map[i];
+
+ if (!q)
+ continue;
+ if (q->handler)
+ napi_enable(&q->napi);
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
+ SEINTARM(q->intr_params) |
+ INGRESSQID(q->cntxt_id));
+ }
+}
+
+/**
+ * setup_sge_queues - configure SGE Tx/Rx/response queues
+ * @adap: the adapter
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adap)
+{
+ int err, msi_idx, i, j;
+ struct sge *s = &adap->sge;
+
+ bitmap_zero(s->starving_fl, MAX_EGRQ);
+ bitmap_zero(s->txq_maperr, MAX_EGRQ);
+
+ if (adap->flags & USING_MSIX)
+ msi_idx = 1; /* vector 0 is for non-queue interrupts */
+ else {
+ err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+ NULL, NULL);
+ if (err)
+ return err;
+ msi_idx = -((int)s->intrq.abs_id + 1);
+ }
+
+ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+ msi_idx, NULL, fwevtq_handler);
+ if (err) {
+freeout: t4_free_sge_resources(adap);
+ return err;
+ }
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ struct port_info *pi = netdev_priv(dev);
+ struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
+ struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
+
+ for (j = 0; j < pi->nqsets; j++, q++) {
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
+ msi_idx, &q->fl,
+ t4_ethrx_handler);
+ if (err)
+ goto freeout;
+ q->rspq.idx = j;
+ memset(&q->stats, 0, sizeof(q->stats));
+ }
+ for (j = 0; j < pi->nqsets; j++, t++) {
+ err = t4_sge_alloc_eth_txq(adap, t, dev,
+ netdev_get_tx_queue(dev, j),
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+ }
+
+ j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
+ for_each_ofldrxq(s, i) {
+ struct sge_ofld_rxq *q = &s->ofldrxq[i];
+ struct net_device *dev = adap->port[i / j];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
+ &q->fl, uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->ofld_rxq[i] = q->rspq.abs_id;
+ err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+
+ for_each_rdmarxq(s, i) {
+ struct sge_ofld_rxq *q = &s->rdmarxq[i];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+ msi_idx, &q->fl, uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->rdma_rxq[i] = q->rspq.abs_id;
+ }
+
+ for_each_port(adap, i) {
+ /*
+ * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+ * have RDMA queues, and that's the right value.
+ */
+ err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
+ s->fw_evtq.cntxt_id,
+ s->rdmarxq[i].rspq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+
+ t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+ RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
+ QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+ return 0;
+}
+
+/*
+ * Returns 0 if new FW was successfully loaded, a positive errno if a load was
+ * started but failed, and a negative errno if flash load couldn't start.
+ */
+static int upgrade_fw(struct adapter *adap)
+{
+ int ret;
+ u32 vers;
+ const struct fw_hdr *hdr;
+ const struct firmware *fw;
+ struct device *dev = adap->pdev_dev;
+
+ ret = request_firmware(&fw, FW_FNAME, dev);
+ if (ret < 0) {
+ dev_err(dev, "unable to load firmware image " FW_FNAME
+ ", error %d\n", ret);
+ return ret;
+ }
+
+ hdr = (const struct fw_hdr *)fw->data;
+ vers = ntohl(hdr->fw_ver);
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ ret = -EINVAL; /* wrong major version, won't do */
+ goto out;
+ }
+
+ /*
+ * If the flash FW is unusable or we found something newer, load it.
+ */
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ vers > adap->params.fw_vers) {
+ ret = -t4_load_fw(adap, fw->data, fw->size);
+ if (!ret)
+ dev_info(dev, "firmware upgraded to version %pI4 from "
+ FW_FNAME "\n", &hdr->fw_ver);
+ }
+out: release_firmware(fw);
+ return ret;
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ void *p = kmalloc(size, GFP_KERNEL);
+
+ if (!p)
+ p = vmalloc(size);
+ if (p)
+ memset(p, 0, size);
+ return p;
+}
+
+/*
+ * Free memory allocated through alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+/*
+ * Implementation of ethtool operations.
+ */
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ return netdev2adap(dev)->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ netdev2adap(dev)->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxOctetsOK ",
+ "TxFramesOK ",
+ "TxBroadcastFrames ",
+ "TxMulticastFrames ",
+ "TxUnicastFrames ",
+ "TxErrorFrames ",
+
+ "TxFrames64 ",
+ "TxFrames65To127 ",
+ "TxFrames128To255 ",
+ "TxFrames256To511 ",
+ "TxFrames512To1023 ",
+ "TxFrames1024To1518 ",
+ "TxFrames1519ToMax ",
+
+ "TxFramesDropped ",
+ "TxPauseFrames ",
+ "TxPPP0Frames ",
+ "TxPPP1Frames ",
+ "TxPPP2Frames ",
+ "TxPPP3Frames ",
+ "TxPPP4Frames ",
+ "TxPPP5Frames ",
+ "TxPPP6Frames ",
+ "TxPPP7Frames ",
+
+ "RxOctetsOK ",
+ "RxFramesOK ",
+ "RxBroadcastFrames ",
+ "RxMulticastFrames ",
+ "RxUnicastFrames ",
+
+ "RxFramesTooLong ",
+ "RxJabberErrors ",
+ "RxFCSErrors ",
+ "RxLengthErrors ",
+ "RxSymbolErrors ",
+ "RxRuntFrames ",
+
+ "RxFrames64 ",
+ "RxFrames65To127 ",
+ "RxFrames128To255 ",
+ "RxFrames256To511 ",
+ "RxFrames512To1023 ",
+ "RxFrames1024To1518 ",
+ "RxFrames1519ToMax ",
+
+ "RxPauseFrames ",
+ "RxPPP0Frames ",
+ "RxPPP1Frames ",
+ "RxPPP2Frames ",
+ "RxPPP3Frames ",
+ "RxPPP4Frames ",
+ "RxPPP5Frames ",
+ "RxPPP6Frames ",
+ "RxPPP7Frames ",
+
+ "RxBG0FramesDropped ",
+ "RxBG1FramesDropped ",
+ "RxBG2FramesDropped ",
+ "RxBG3FramesDropped ",
+ "RxBG0FramesTrunc ",
+ "RxBG1FramesTrunc ",
+ "RxBG2FramesTrunc ",
+ "RxBG3FramesTrunc ",
+
+ "TSO ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "VLANextractions ",
+ "VLANinsertions ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define T4_REGMAP_SIZE (160 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+ return T4_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(adapter->pdev));
+
+ if (!adapter->params.fw_vers)
+ strcpy(info->fw_version, "N/A");
+ else
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u.%u.%u.%u, TP %u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+/*
+ * port stats maintained per queue of the port. They should be in the same
+ * order as in stats_strings above.
+ */
+struct queue_port_stats {
+ u64 tso;
+ u64 tx_csum;
+ u64 rx_csum;
+ u64 vlan_ex;
+ u64 vlan_ins;
+};
+
+static void collect_sge_port_stats(const struct adapter *adap,
+ const struct port_info *p, struct queue_port_stats *s)
+{
+ int i;
+ const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+ const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+ s->tso += tx->tso;
+ s->tx_csum += tx->tx_cso;
+ s->rx_csum += rx->stats.rx_cso;
+ s->vlan_ex += rx->stats.vlan_ex;
+ s->vlan_ins += tx->vlan_ins;
+ }
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+
+ data += sizeof(struct port_stats) / sizeof(u64);
+ collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return a version number to identify the type of adapter. The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *ap)
+{
+ return 4 | (ap->params.rev << 10);
+}
+
+static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
+ unsigned int end)
+{
+ u32 *p = buf + start;
+
+ for ( ; start <= end; start += sizeof(u32))
+ *p++ = t4_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ static const unsigned int reg_ranges[] = {
+ 0x1008, 0x1108,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1300, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x30d8,
+ 0x30e0, 0x5924,
+ 0x5960, 0x59d4,
+ 0x5a00, 0x5af8,
+ 0x6000, 0x6098,
+ 0x6100, 0x6150,
+ 0x6200, 0x6208,
+ 0x6240, 0x6248,
+ 0x6280, 0x6338,
+ 0x6370, 0x638c,
+ 0x6400, 0x643c,
+ 0x6500, 0x6524,
+ 0x6a00, 0x6a38,
+ 0x6a60, 0x6a78,
+ 0x6b00, 0x6b84,
+ 0x6bf0, 0x6c84,
+ 0x6cf0, 0x6d84,
+ 0x6df0, 0x6e84,
+ 0x6ef0, 0x6f84,
+ 0x6ff0, 0x7084,
+ 0x70f0, 0x7184,
+ 0x71f0, 0x7284,
+ 0x72f0, 0x7384,
+ 0x73f0, 0x7450,
+ 0x7500, 0x7530,
+ 0x7600, 0x761c,
+ 0x7680, 0x76cc,
+ 0x7700, 0x7798,
+ 0x77c0, 0x77fc,
+ 0x7900, 0x79fc,
+ 0x7b00, 0x7c38,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8e1c,
+ 0x8e30, 0x8e78,
+ 0x8ea0, 0x8f6c,
+ 0x8fc0, 0x9074,
+ 0x90fc, 0x90fc,
+ 0x9400, 0x9458,
+ 0x9600, 0x96bc,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xea7c,
+ 0xf000, 0x11190,
+ 0x19040, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x1924c,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194f8,
+ 0x19800, 0x19f30,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e040, 0x1e04c,
+ 0x1e240, 0x1e28c,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e440, 0x1e44c,
+ 0x1e640, 0x1e68c,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e840, 0x1e84c,
+ 0x1ea40, 0x1ea8c,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee40, 0x1ee8c,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f040, 0x1f04c,
+ 0x1f240, 0x1f28c,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f440, 0x1f44c,
+ 0x1f640, 0x1f68c,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f840, 0x1f84c,
+ 0x1fa40, 0x1fa8c,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe40, 0x1fe8c,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x20000, 0x2002c,
+ 0x20100, 0x2013c,
+ 0x20190, 0x201c8,
+ 0x20200, 0x20318,
+ 0x20400, 0x20528,
+ 0x20540, 0x20614,
+ 0x21000, 0x21040,
+ 0x2104c, 0x21060,
+ 0x210c0, 0x210ec,
+ 0x21200, 0x21268,
+ 0x21270, 0x21284,
+ 0x212fc, 0x21388,
+ 0x21400, 0x21404,
+ 0x21500, 0x21518,
+ 0x2152c, 0x2153c,
+ 0x21550, 0x21554,
+ 0x21600, 0x21600,
+ 0x21608, 0x21628,
+ 0x21630, 0x2163c,
+ 0x21700, 0x2171c,
+ 0x21780, 0x2178c,
+ 0x21800, 0x21c38,
+ 0x21c80, 0x21d7c,
+ 0x21e00, 0x21e04,
+ 0x22000, 0x2202c,
+ 0x22100, 0x2213c,
+ 0x22190, 0x221c8,
+ 0x22200, 0x22318,
+ 0x22400, 0x22528,
+ 0x22540, 0x22614,
+ 0x23000, 0x23040,
+ 0x2304c, 0x23060,
+ 0x230c0, 0x230ec,
+ 0x23200, 0x23268,
+ 0x23270, 0x23284,
+ 0x232fc, 0x23388,
+ 0x23400, 0x23404,
+ 0x23500, 0x23518,
+ 0x2352c, 0x2353c,
+ 0x23550, 0x23554,
+ 0x23600, 0x23600,
+ 0x23608, 0x23628,
+ 0x23630, 0x2363c,
+ 0x23700, 0x2371c,
+ 0x23780, 0x2378c,
+ 0x23800, 0x23c38,
+ 0x23c80, 0x23d7c,
+ 0x23e00, 0x23e04,
+ 0x24000, 0x2402c,
+ 0x24100, 0x2413c,
+ 0x24190, 0x241c8,
+ 0x24200, 0x24318,
+ 0x24400, 0x24528,
+ 0x24540, 0x24614,
+ 0x25000, 0x25040,
+ 0x2504c, 0x25060,
+ 0x250c0, 0x250ec,
+ 0x25200, 0x25268,
+ 0x25270, 0x25284,
+ 0x252fc, 0x25388,
+ 0x25400, 0x25404,
+ 0x25500, 0x25518,
+ 0x2552c, 0x2553c,
+ 0x25550, 0x25554,
+ 0x25600, 0x25600,
+ 0x25608, 0x25628,
+ 0x25630, 0x2563c,
+ 0x25700, 0x2571c,
+ 0x25780, 0x2578c,
+ 0x25800, 0x25c38,
+ 0x25c80, 0x25d7c,
+ 0x25e00, 0x25e04,
+ 0x26000, 0x2602c,
+ 0x26100, 0x2613c,
+ 0x26190, 0x261c8,
+ 0x26200, 0x26318,
+ 0x26400, 0x26528,
+ 0x26540, 0x26614,
+ 0x27000, 0x27040,
+ 0x2704c, 0x27060,
+ 0x270c0, 0x270ec,
+ 0x27200, 0x27268,
+ 0x27270, 0x27284,
+ 0x272fc, 0x27388,
+ 0x27400, 0x27404,
+ 0x27500, 0x27518,
+ 0x2752c, 0x2753c,
+ 0x27550, 0x27554,
+ 0x27600, 0x27600,
+ 0x27608, 0x27628,
+ 0x27630, 0x2763c,
+ 0x27700, 0x2771c,
+ 0x27780, 0x2778c,
+ 0x27800, 0x27c38,
+ 0x27c80, 0x27d7c,
+ 0x27e00, 0x27e04
+ };
+
+ int i;
+ struct adapter *ap = netdev2adap(dev);
+
+ regs->version = mk_adap_vers(ap);
+
+ memset(buf, 0, T4_REGMAP_SIZE);
+ for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+ if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ t4_restart_aneg(p->adapter, 0, p->tx_chan);
+ return 0;
+}
+
+static int identify_port(struct net_device *dev, u32 data)
+{
+ if (data == 0)
+ data = 2; /* default to 2 seconds */
+
+ return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
+ data * 5);
+}
+
+static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ else if (type == FW_PORT_TYPE_FIBER)
+ v |= SUPPORTED_FIBRE;
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static unsigned int to_fw_linkcaps(unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (caps & ADVERTISED_100baseT_Full)
+ v |= FW_PORT_CAP_SPEED_100M;
+ if (caps & ADVERTISED_1000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_1G;
+ if (caps & ADVERTISED_10000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_10G;
+ return v;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI)
+ cmd->port = PORT_TP;
+ else if (p->port_type == FW_PORT_TYPE_FIBER)
+ cmd->port = PORT_FIBRE;
+ else if (p->port_type == FW_PORT_TYPE_TWINAX)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_OTHER;
+
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+ cmd->advertising = from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
+ cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
+ cmd->duplex = DUPLEX_FULL;
+ cmd->autoneg = p->link_cfg.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static unsigned int speed_to_caps(int speed)
+{
+ if (speed == SPEED_100)
+ return FW_PORT_CAP_SPEED_100M;
+ if (speed == SPEED_1000)
+ return FW_PORT_CAP_SPEED_1G;
+ if (speed == SPEED_10000)
+ return FW_PORT_CAP_SPEED_10G;
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ unsigned int cap;
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+
+ if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
+ return -EINVAL;
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ /*
+ * PHY offers a single speed. See if that's what's
+ * being requested.
+ */
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ (lc->supported & speed_to_caps(cmd->speed)))
+ return 0;
+ return -EINVAL;
+ }
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ cap = speed_to_caps(cmd->speed);
+
+ if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
+ cmd->speed == SPEED_10000)
+ return -EINVAL;
+ lc->requested_speed = cap;
+ lc->advertising = 0;
+ } else {
+ cap = to_fw_linkcaps(cmd->advertising);
+ if (!(lc->supported & cap))
+ return -EINVAL;
+ lc->requested_speed = 0;
+ lc->advertising = cap | FW_PORT_CAP_ANEG;
+ }
+ lc->autoneg = cmd->autoneg;
+
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+ return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+
+ if (epause->autoneg == AUTONEG_DISABLE)
+ lc->requested_fc = 0;
+ else if (lc->supported & FW_PORT_CAP_ANEG)
+ lc->requested_fc = PAUSE_AUTONEG;
+ else
+ return -EINVAL;
+
+ if (epause->rx_pause)
+ lc->requested_fc |= PAUSE_RX;
+ if (epause->tx_pause)
+ lc->requested_fc |= PAUSE_TX;
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+ return 0;
+}
+
+static u32 get_rx_csum(struct net_device *dev)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ return p->rx_offload & RX_CSO;
+}
+
+static int set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ if (data)
+ p->rx_offload |= RX_CSO;
+ else
+ p->rx_offload &= ~RX_CSO;
+ return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct sge *s = &pi->adapter->sge;
+
+ e->rx_max_pending = MAX_RX_BUFFERS;
+ e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+ e->rx_jumbo_max_pending = 0;
+ e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+ e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+ e->rx_jumbo_pending = 0;
+ e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ int i;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+
+ if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+ e->tx_pending > MAX_TXQ_ENTRIES ||
+ e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ for (i = 0; i < pi->nqsets; ++i) {
+ s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+ s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+ s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+ }
+ return 0;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us. 0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adap,
+ const struct sge_rspq *q)
+{
+ unsigned int idx = q->intr_params >> 1;
+
+ return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+}
+
+/**
+ * set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ * @adap: the adapter
+ * @q: the Rx queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an Rx queue's interrupt hold-off time and packet count. At least
+ * one of the two needs to be enabled for the queue to generate interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt)
+{
+ if ((us | cnt) == 0)
+ cnt = 1;
+
+ if (cnt) {
+ int err;
+ u32 v, new_idx;
+
+ new_idx = closest_thres(&adap->sge, cnt);
+ if (q->desc && q->pktcnt_idx != new_idx) {
+ /* the queue has already been created, update it */
+ v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
+ if (err)
+ return err;
+ }
+ q->pktcnt_idx = new_idx;
+ }
+
+ us = us == 0 ? 6 : closest_timer(&adap->sge, us);
+ q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
+ c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
+ const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+
+ c->rx_coalesce_usecs = qtimer_val(adap, rq);
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+ adap->sge.counter_val[rq->pktcnt_idx] : 0;
+ return 0;
+}
+
+/*
+ * Translate a physical EEPROM address to virtual. The first 1K is accessed
+ * through virtual addresses starting at 31K, the rest is accessed through
+ * virtual addresses starting at 0. This mapping is correct only for PF0.
+ */
+static int eeprom_ptov(unsigned int phys_addr)
+{
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024;
+ return -EINVAL;
+}
+
+/*
+ * The next two routines implement eeprom read/write from physical addresses.
+ * The physical->virtual translation is correct only for PF0.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+ int vaddr = eeprom_ptov(phys_addr);
+
+ if (vaddr >= 0)
+ vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+ int vaddr = eeprom_ptov(phys_addr);
+
+ if (vaddr >= 0)
+ vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+ u8 *data)
+{
+ int i, err = 0;
+ struct adapter *adapter = netdev2adap(dev);
+
+ u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+ err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+ if (!err)
+ memcpy(data, buf + e->offset, e->len);
+ kfree(buf);
+ return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u8 *buf;
+ int err = 0;
+ u32 aligned_offset, aligned_len, *p;
+ struct adapter *adapter = netdev2adap(dev);
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+ /*
+ * RMW possibly needed for first or last words.
+ */
+ buf = kmalloc(aligned_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+ if (!err && aligned_len > 4)
+ err = eeprom_rd_phys(adapter,
+ aligned_offset + aligned_len - 4,
+ (u32 *)&buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+ } else
+ buf = data;
+
+ err = t4_seeprom_wp(adapter, false);
+ if (err)
+ goto out;
+
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = eeprom_wr_phys(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t4_seeprom_wp(adapter, true);
+out:
+ if (buf != data)
+ kfree(buf);
+ return err;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+ int ret;
+ const struct firmware *fw;
+ struct adapter *adap = netdev2adap(netdev);
+
+ ef->data[sizeof(ef->data) - 1] = '\0';
+ ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_load_fw(adap, fw->data, fw->size);
+ release_firmware(fw);
+ if (!ret)
+ dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
+ return ret;
+}
+
+#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
+#define BCAST_CRC 0xa0ccc1a6
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = netdev2adap(dev)->wol;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ int err = 0;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (wol->wolopts & ~WOL_SUPPORTED)
+ return -EINVAL;
+ t4_wol_magic_enable(pi->adapter, pi->tx_chan,
+ (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
+ if (wol->wolopts & WAKE_BCAST) {
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
+ ~0ULL, 0, false);
+ if (!err)
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
+ ~6ULL, ~0ULL, BCAST_CRC, true);
+ } else
+ t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
+ return err;
+}
+
+static int set_tso(struct net_device *dev, u32 value)
+{
+ if (value)
+ dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ return 0;
+}
+
+static struct ethtool_ops cxgb_ethtool_ops = {
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+ .get_eeprom_len = get_eeprom_len,
+ .get_eeprom = get_eeprom,
+ .set_eeprom = set_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_rx_csum = get_rx_csum,
+ .set_rx_csum = set_rx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .phys_id = identify_port,
+ .nway_reset = restart_autoneg,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_stats,
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+ .get_wol = get_wol,
+ .set_wol = set_wol,
+ .set_tso = set_tso,
+ .flash_device = set_flash,
+};
+
+/*
+ * debugfs support
+ */
+
+static int mem_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file->f_path.dentry->d_inode->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct adapter *adap = file->private_data - mem;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ __be32 data[16];
+
+ if (mem == MEM_MC)
+ ret = t4_mc_read(adap, pos, data, NULL);
+ else
+ ret = t4_edc_read(adap, mem, pos, data, NULL);
+ if (ret)
+ return ret;
+
+ ofst = pos % sizeof(data);
+ len = min(count, sizeof(data) - ofst);
+ if (copy_to_user(buf, (u8 *)data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mem_open,
+ .read = mem_read,
+};
+
+static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
+ (void *)adap + idx, &mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+static int __devinit setup_debugfs(struct adapter *adap)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(adap->debugfs_root))
+ return -1;
+
+ i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
+ if (i & EDRAM0_ENABLE)
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
+ if (i & EDRAM1_ENABLE)
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (adap->l2t)
+ debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
+ &t4_l2t_fops);
+ return 0;
+}
+
+/*
+ * upper-layer driver support
+ */
+
+/*
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgb4_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ spin_lock_bh(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ spin_unlock_bh(&t->atid_lock);
+ return atid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_atid);
+
+/*
+ * Release an active-open TID.
+ */
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ spin_lock_bh(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ spin_unlock_bh(&t->atid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_atid);
+
+/*
+ * Allocate a server TID and set it to the supplied value.
+ */
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
+{
+ int stid;
+
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET) {
+ stid = find_first_zero_bit(t->stid_bmap, t->nstids);
+ if (stid < t->nstids)
+ __set_bit(stid, t->stid_bmap);
+ else
+ stid = -1;
+ } else {
+ stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+ if (stid < 0)
+ stid = -1;
+ }
+ if (stid >= 0) {
+ t->stid_tab[stid].data = data;
+ stid += t->stid_base;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_stid);
+
+/*
+ * Release a server TID.
+ */
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
+{
+ stid -= t->stid_base;
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET)
+ __clear_bit(stid, t->stid_bmap);
+ else
+ bitmap_release_region(t->stid_bmap, stid, 2);
+ t->stid_tab[stid].data = NULL;
+ t->stids_in_use--;
+ spin_unlock_bh(&t->stid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_stid);
+
+/*
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
+ unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+ req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+/*
+ * Queue a TID release request and if necessary schedule a work queue to
+ * process it.
+ */
+void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+ unsigned int tid)
+{
+ void **p = &t->tid_tab[tid];
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ spin_lock_bh(&adap->tid_release_lock);
+ *p = adap->tid_release_head;
+ /* Low 2 bits encode the Tx channel number */
+ adap->tid_release_head = (void **)((uintptr_t)p | chan);
+ if (!adap->tid_release_task_busy) {
+ adap->tid_release_task_busy = true;
+ schedule_work(&adap->tid_release_task);
+ }
+ spin_unlock_bh(&adap->tid_release_lock);
+}
+EXPORT_SYMBOL(cxgb4_queue_tid_release);
+
+/*
+ * Process the list of pending TID release requests.
+ */
+static void process_tid_release_list(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct adapter *adap;
+
+ adap = container_of(work, struct adapter, tid_release_task);
+
+ spin_lock_bh(&adap->tid_release_lock);
+ while (adap->tid_release_head) {
+ void **p = adap->tid_release_head;
+ unsigned int chan = (uintptr_t)p & 3;
+ p = (void *)p - chan;
+
+ adap->tid_release_head = *p;
+ *p = NULL;
+ spin_unlock_bh(&adap->tid_release_lock);
+
+ while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL)))
+ schedule_timeout_uninterruptible(1);
+
+ mk_tid_release(skb, chan, p - adap->tids.tid_tab);
+ t4_ofld_send(adap, skb);
+ spin_lock_bh(&adap->tid_release_lock);
+ }
+ adap->tid_release_task_busy = false;
+ spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+{
+ void *old;
+ struct sk_buff *skb;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ old = t->tid_tab[tid];
+ skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+ if (likely(skb)) {
+ t->tid_tab[tid] = NULL;
+ mk_tid_release(skb, chan, tid);
+ t4_ofld_send(adap, skb);
+ } else
+ cxgb4_queue_tid_release(t, chan, tid);
+ if (old)
+ atomic_dec(&t->tids_in_use);
+}
+EXPORT_SYMBOL(cxgb4_remove_tid);
+
+/*
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int natids = t->natids;
+
+ size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+ t->nstids * sizeof(*t->stid_tab) +
+ BITS_TO_LONGS(t->nstids) * sizeof(long);
+ t->tid_tab = t4_alloc_mem(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
+ t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+ spin_lock_init(&t->stid_lock);
+ spin_lock_init(&t->atid_lock);
+
+ t->stids_in_use = 0;
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ atomic_set(&t->tids_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+ bitmap_zero(t->stid_bmap, t->nstids);
+ return 0;
+}
+
+/**
+ * cxgb4_create_server - create an IP server
+ * @dev: the device
+ * @stid: the server TID
+ * @sip: local IP address to bind server to
+ * @sport: the server's TCP port
+ * @queue: queue to direct messages from this server to
+ *
+ * Create an IP server for the given port and address.
+ * Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, unsigned int queue)
+{
+ unsigned int chan;
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_pass_open_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ adap = netdev2adap(dev);
+ req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
+ req->local_port = sport;
+ req->peer_port = htons(0);
+ req->local_ip = sip;
+ req->peer_ip = htonl(0);
+ chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+ req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+ SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ return t4_mgmt_tx(adap, skb);
+}
+EXPORT_SYMBOL(cxgb4_create_server);
+
+/**
+ * cxgb4_create_server6 - create an IPv6 server
+ * @dev: the device
+ * @stid: the server TID
+ * @sip: local IPv6 address to bind server to
+ * @sport: the server's TCP port
+ * @queue: queue to direct messages from this server to
+ *
+ * Create an IPv6 server for the given port and address.
+ * Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+ const struct in6_addr *sip, __be16 sport,
+ unsigned int queue)
+{
+ unsigned int chan;
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_pass_open_req6 *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ adap = netdev2adap(dev);
+ req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+ req->local_port = sport;
+ req->peer_port = htons(0);
+ req->local_ip_hi = *(__be64 *)(sip->s6_addr);
+ req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
+ req->peer_ip_hi = cpu_to_be64(0);
+ req->peer_ip_lo = cpu_to_be64(0);
+ chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+ req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+ SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ return t4_mgmt_tx(adap, skb);
+}
+EXPORT_SYMBOL(cxgb4_create_server6);
+
+/**
+ * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
+ * @mtus: the HW MTU table
+ * @mtu: the target MTU
+ * @idx: index of selected entry in the MTU table
+ *
+ * Returns the index and the value in the HW MTU table that is closest to
+ * but does not exceed @mtu, unless @mtu is smaller than any value in the
+ * table, in which case that smallest available value is selected.
+ */
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx)
+{
+ unsigned int i = 0;
+
+ while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
+ ++i;
+ if (idx)
+ *idx = i;
+ return mtus[i];
+}
+EXPORT_SYMBOL(cxgb4_best_mtu);
+
+/**
+ * cxgb4_port_chan - get the HW channel of a port
+ * @dev: the net device for the port
+ *
+ * Return the HW Tx channel of the given port.
+ */
+unsigned int cxgb4_port_chan(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->tx_chan;
+}
+EXPORT_SYMBOL(cxgb4_port_chan);
+
+/**
+ * cxgb4_port_viid - get the VI id of a port
+ * @dev: the net device for the port
+ *
+ * Return the VI id of the given port.
+ */
+unsigned int cxgb4_port_viid(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->viid;
+}
+EXPORT_SYMBOL(cxgb4_port_viid);
+
+/**
+ * cxgb4_port_idx - get the index of a port
+ * @dev: the net device for the port
+ *
+ * Return the index of the given port.
+ */
+unsigned int cxgb4_port_idx(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->port_id;
+}
+EXPORT_SYMBOL(cxgb4_port_idx);
+
+/**
+ * cxgb4_netdev_by_hwid - return the net device of a HW port
+ * @pdev: identifies the adapter
+ * @id: the HW port id
+ *
+ * Return the net device associated with the interface with the given HW
+ * id.
+ */
+struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
+{
+ const struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap || id >= NCHAN)
+ return NULL;
+ id = adap->chan_map[id];
+ return id < MAX_NPORTS ? adap->port[id] : NULL;
+}
+EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
+
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6)
+{
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, v4, v6);
+ spin_unlock(&adap->stats_lock);
+}
+EXPORT_SYMBOL(cxgb4_get_tcp_stats);
+
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+ const unsigned int *pgsz_order)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
+ t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
+ HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
+ HPZ3(pgsz_order[3]));
+}
+EXPORT_SYMBOL(cxgb4_iscsi_init);
+
+static struct pci_driver cxgb4_driver;
+
+static void check_neigh_update(struct neighbour *neigh)
+{
+ const struct device *parent;
+ const struct net_device *netdev = neigh->dev;
+
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ netdev = vlan_dev_real_dev(netdev);
+ parent = netdev->dev.parent;
+ if (parent && parent->driver == &cxgb4_driver.driver)
+ t4_l2t_update(dev_get_drvdata(parent), neigh);
+}
+
+static int netevent_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ check_neigh_update(data);
+ break;
+ case NETEVENT_PMTU_UPDATE:
+ case NETEVENT_REDIRECT:
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool netevent_registered;
+static struct notifier_block cxgb4_netevent_nb = {
+ .notifier_call = netevent_cb
+};
+
+static void uld_attach(struct adapter *adap, unsigned int uld)
+{
+ void *handle;
+ struct cxgb4_lld_info lli;
+
+ lli.pdev = adap->pdev;
+ lli.l2t = adap->l2t;
+ lli.tids = &adap->tids;
+ lli.ports = adap->port;
+ lli.vr = &adap->vres;
+ lli.mtus = adap->params.mtus;
+ if (uld == CXGB4_ULD_RDMA) {
+ lli.rxq_ids = adap->sge.rdma_rxq;
+ lli.nrxq = adap->sge.rdmaqs;
+ } else if (uld == CXGB4_ULD_ISCSI) {
+ lli.rxq_ids = adap->sge.ofld_rxq;
+ lli.nrxq = adap->sge.ofldqsets;
+ }
+ lli.ntxq = adap->sge.ofldqsets;
+ lli.nchan = adap->params.nports;
+ lli.nports = adap->params.nports;
+ lli.wr_cred = adap->params.ofldq_wr_cred;
+ lli.adapter_type = adap->params.rev;
+ lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+ lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
+ t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
+ lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
+ t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+ lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
+ lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+ lli.fw_vers = adap->params.fw_vers;
+
+ handle = ulds[uld].add(&lli);
+ if (IS_ERR(handle)) {
+ dev_warn(adap->pdev_dev,
+ "could not attach to the %s driver, error %ld\n",
+ uld_str[uld], PTR_ERR(handle));
+ return;
+ }
+
+ adap->uld_handle[uld] = handle;
+
+ if (!netevent_registered) {
+ register_netevent_notifier(&cxgb4_netevent_nb);
+ netevent_registered = true;
+ }
+}
+
+static void attach_ulds(struct adapter *adap)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adap->list_node, &adapter_list);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (ulds[i].add)
+ uld_attach(adap, i);
+ mutex_unlock(&uld_mutex);
+}
+
+static void detach_ulds(struct adapter *adap)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ list_del(&adap->list_node);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (adap->uld_handle[i]) {
+ ulds[i].state_change(adap->uld_handle[i],
+ CXGB4_STATE_DETACH);
+ adap->uld_handle[i] = NULL;
+ }
+ if (netevent_registered && list_empty(&adapter_list)) {
+ unregister_netevent_notifier(&cxgb4_netevent_nb);
+ netevent_registered = false;
+ }
+ mutex_unlock(&uld_mutex);
+}
+
+static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (adap->uld_handle[i])
+ ulds[i].state_change(adap->uld_handle[i], new_state);
+ mutex_unlock(&uld_mutex);
+}
+
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
+{
+ int ret = 0;
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+ mutex_lock(&uld_mutex);
+ if (ulds[type].add) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ulds[type] = *p;
+ list_for_each_entry(adap, &adapter_list, list_node)
+ uld_attach(adap, type);
+out: mutex_unlock(&uld_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(cxgb4_register_uld);
+
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
+{
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+ mutex_lock(&uld_mutex);
+ list_for_each_entry(adap, &adapter_list, list_node)
+ adap->uld_handle[type] = NULL;
+ ulds[type].add = NULL;
+ mutex_unlock(&uld_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_unregister_uld);
+
+/**
+ * cxgb_up - enable the adapter
+ * @adap: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ *
+ * Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+ int err = 0;
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ err = setup_sge_queues(adap);
+ if (err)
+ goto out;
+ err = setup_rss(adap);
+ if (err) {
+ t4_free_sge_resources(adap);
+ goto out;
+ }
+ if (adap->flags & USING_MSIX)
+ name_msix_vecs(adap);
+ adap->flags |= FULL_INIT_DONE;
+ }
+
+ if (adap->flags & USING_MSIX) {
+ err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
+ adap->msix_info[0].desc, adap);
+ if (err)
+ goto irq_err;
+
+ err = request_msix_queue_irqs(adap);
+ if (err) {
+ free_irq(adap->msix_info[0].vec, adap);
+ goto irq_err;
+ }
+ } else {
+ err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
+ (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ adap->name, adap);
+ if (err)
+ goto irq_err;
+ }
+ enable_rx(adap);
+ t4_sge_start(adap);
+ t4_intr_enable(adap);
+ notify_ulds(adap, CXGB4_STATE_UP);
+ out:
+ return err;
+ irq_err:
+ dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+ goto out;
+}
+
+static void cxgb_down(struct adapter *adapter)
+{
+ t4_intr_disable(adapter);
+ cancel_work_sync(&adapter->tid_release_task);
+ adapter->tid_release_task_busy = false;
+
+ if (adapter->flags & USING_MSIX) {
+ free_msix_queue_irqs(adapter);
+ free_irq(adapter->msix_info[0].vec, adapter);
+ } else
+ free_irq(adapter->pdev->irq, adapter);
+ quiesce_rx(adapter);
+}
+
+/*
+ * net_device operations
+ */
+static int cxgb_open(struct net_device *dev)
+{
+ int err;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+ return err;
+
+ dev->real_num_tx_queues = pi->nqsets;
+ set_bit(pi->tx_chan, &adapter->open_device_map);
+ link_start(dev);
+ netif_tx_start_all_queues(dev);
+ return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
+
+ clear_bit(pi->tx_chan, &adapter->open_device_map);
+
+ if (!adapter->open_device_map)
+ cxgb_down(adapter);
+ return 0;
+}
+
+static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+{
+ struct port_stats stats;
+ struct port_info *p = netdev_priv(dev);
+ struct adapter *adapter = p->adapter;
+ struct net_device_stats *ns = &dev->stats;
+
+ spin_lock(&adapter->stats_lock);
+ t4_get_port_stats(adapter, p->tx_chan, &stats);
+ spin_unlock(&adapter->stats_lock);
+
+ ns->tx_bytes = stats.tx_octets;
+ ns->tx_packets = stats.tx_frames;
+ ns->rx_bytes = stats.rx_octets;
+ ns->rx_packets = stats.rx_frames;
+ ns->multicast = stats.rx_mcast_frames;
+
+ /* detailed rx_errors */
+ ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
+ stats.rx_runt;
+ ns->rx_over_errors = 0;
+ ns->rx_crc_errors = stats.rx_fcs_err;
+ ns->rx_frame_errors = stats.rx_symbol_err;
+ ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
+ stats.rx_ovflow2 + stats.rx_ovflow3 +
+ stats.rx_trunc0 + stats.rx_trunc1 +
+ stats.rx_trunc2 + stats.rx_trunc3;
+ ns->rx_missed_errors = 0;
+
+ /* detailed tx_errors */
+ ns->tx_aborted_errors = 0;
+ ns->tx_carrier_errors = 0;
+ ns->tx_fifo_errors = 0;
+ ns->tx_heartbeat_errors = 0;
+ ns->tx_window_errors = 0;
+
+ ns->tx_errors = stats.tx_error_frames;
+ ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+ ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
+ return ns;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ int ret = 0, prtad, devad;
+ struct port_info *pi = netdev_priv(dev);
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ if (pi->mdio_addr < 0)
+ return -EOPNOTSUPP;
+ data->phy_id = pi->mdio_addr;
+ break;
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (mdio_phy_id_is_c45(data->phy_id)) {
+ prtad = mdio_phy_id_prtad(data->phy_id);
+ devad = mdio_phy_id_devad(data->phy_id);
+ } else if (data->phy_id < 32) {
+ prtad = data->phy_id;
+ devad = 0;
+ data->reg_num &= 0x1f;
+ } else
+ return -EINVAL;
+
+ if (cmd == SIOCGMIIREG)
+ ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
+ data->reg_num, &data->val_out);
+ else
+ ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
+ data->reg_num, data->val_in);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+ /* unfortunately we can't return errors to the stack */
+ set_rxmode(dev, -1, false);
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
+ return -EINVAL;
+ ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
+ true);
+ if (!ret)
+ dev->mtu = new_mtu;
+ return ret;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+ int ret;
+ struct sockaddr *addr = p;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
+ addr->sa_data, true, true);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
+static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ pi->vlan_grp = grp;
+ t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (adap->flags & USING_MSIX) {
+ int i;
+ struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = pi->nqsets; i; i--, rx++)
+ t4_sge_intr_msix(0, &rx->rspq);
+ } else
+ t4_intr_handler(adap)(0, adap);
+}
+#endif
+
+static const struct net_device_ops cxgb4_netdev_ops = {
+ .ndo_open = cxgb_open,
+ .ndo_stop = cxgb_close,
+ .ndo_start_xmit = t4_eth_xmit,
+ .ndo_get_stats = cxgb_get_stats,
+ .ndo_set_rx_mode = cxgb_set_rxmode,
+ .ndo_set_mac_address = cxgb_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_change_mtu = cxgb_change_mtu,
+ .ndo_vlan_rx_register = vlan_rx_register,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb_netpoll,
+#endif
+};
+
+void t4_fatal_err(struct adapter *adap)
+{
+ t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+ t4_intr_disable(adap);
+ dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+ u32 bar0;
+
+ bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
+ (bar0 + MEMWIN0_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
+ (bar0 + MEMWIN1_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
+ (bar0 + MEMWIN2_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+}
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ */
+static int adap_init0(struct adapter *adap)
+{
+ int ret;
+ u32 v, port_vec;
+ enum dev_state state;
+ u32 params[7], val[7];
+ struct fw_caps_config_cmd c;
+
+ ret = t4_check_fw_version(adap);
+ if (ret == -EINVAL || ret > 0) {
+ if (upgrade_fw(adap) >= 0) /* recache FW version */
+ ret = t4_check_fw_version(adap);
+ }
+ if (ret < 0)
+ return ret;
+
+ /* contact FW, request master */
+ ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
+ ret);
+ return ret;
+ }
+
+ /* reset device */
+ ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
+ if (ret < 0)
+ goto bye;
+
+ /* get device capabilities */
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ c.retval_len16 = htonl(FW_LEN16(c));
+ ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+ if (ret < 0)
+ goto bye;
+
+ /* select capabilities we'll be using */
+ if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+ if (!vf_acls)
+ c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+ else
+ c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+ } else if (vf_acls) {
+ dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+ goto bye;
+ }
+ c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
+ if (ret < 0)
+ goto bye;
+
+ ret = t4_config_glbl_rss(adap, 0,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ if (ret < 0)
+ goto bye;
+
+ ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
+ FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ if (ret < 0)
+ goto bye;
+
+ for (v = 0; v < SGE_NTIMERS - 1; v++)
+ adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
+ adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+ adap->sge.counter_val[0] = 1;
+ for (v = 1; v < SGE_NCOUNTERS; v++)
+ adap->sge.counter_val[v] = min(intr_cnt[v - 1],
+ THRESHOLD_3_MASK);
+ t4_sge_init(adap);
+
+ /* get basic stuff going */
+ ret = t4_early_init(adap, 0);
+ if (ret < 0)
+ goto bye;
+
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
+
+ params[0] = FW_PARAM_DEV(PORTVEC);
+ params[1] = FW_PARAM_PFVF(L2T_START);
+ params[2] = FW_PARAM_PFVF(L2T_END);
+ params[3] = FW_PARAM_PFVF(FILTER_START);
+ params[4] = FW_PARAM_PFVF(FILTER_END);
+ ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
+ if (ret < 0)
+ goto bye;
+ port_vec = val[0];
+ adap->tids.ftid_base = val[3];
+ adap->tids.nftids = val[4] - val[3] + 1;
+
+ if (c.ofldcaps) {
+ /* query offload-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ params[1] = FW_PARAM_PFVF(SERVER_START);
+ params[2] = FW_PARAM_PFVF(SERVER_END);
+ params[3] = FW_PARAM_PFVF(TDDP_START);
+ params[4] = FW_PARAM_PFVF(TDDP_END);
+ params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+ ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+ adap->tids.stid_base = val[1];
+ adap->tids.nstids = val[2] - val[1] + 1;
+ adap->vres.ddp.start = val[3];
+ adap->vres.ddp.size = val[4] - val[3] + 1;
+ adap->params.ofldq_wr_cred = val[5];
+ adap->params.offload = 1;
+ }
+ if (c.rdmacaps) {
+ params[0] = FW_PARAM_PFVF(STAG_START);
+ params[1] = FW_PARAM_PFVF(STAG_END);
+ params[2] = FW_PARAM_PFVF(RQ_START);
+ params[3] = FW_PARAM_PFVF(RQ_END);
+ params[4] = FW_PARAM_PFVF(PBL_START);
+ params[5] = FW_PARAM_PFVF(PBL_END);
+ ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.stag.start = val[0];
+ adap->vres.stag.size = val[1] - val[0] + 1;
+ adap->vres.rq.start = val[2];
+ adap->vres.rq.size = val[3] - val[2] + 1;
+ adap->vres.pbl.start = val[4];
+ adap->vres.pbl.size = val[5] - val[4] + 1;
+ }
+ if (c.iscsicaps) {
+ params[0] = FW_PARAM_PFVF(ISCSI_START);
+ params[1] = FW_PARAM_PFVF(ISCSI_END);
+ ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.iscsi.start = val[0];
+ adap->vres.iscsi.size = val[1] - val[0] + 1;
+ }
+#undef FW_PARAM_PFVF
+#undef FW_PARAM_DEV
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+ adap->flags |= FW_OK;
+
+ /* These are finalized by FW initialization, load their values now */
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+
+ /* tweak some settings */
+ t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+ t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+ v = t4_read_reg(adap, TP_PIO_DATA);
+ t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ setup_memwin(adap);
+ return 0;
+
+ /*
+ * If a command timed out or failed with EIO FW does not operate within
+ * its spec or something catastrophic happened to HW/FW, stop issuing
+ * commands.
+ */
+bye: if (ret != -ETIMEDOUT && ret != -EIO)
+ t4_fw_bye(adap, 0);
+ return ret;
+}
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+}
+
+static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->intr_params = QINTR_TIMER_IDX(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
+ q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and type
+ * of ports we found and the number of available CPUs. Most settings can be
+ * modified by the admin prior to actual use.
+ */
+static void __devinit cfg_queues(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ int i, q10g = 0, n10g = 0, qidx = 0;
+
+ for_each_port(adap, i)
+ n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
+ /*
+ * We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g)
+ q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+ if (q10g > num_online_cpus())
+ q10g = num_online_cpus();
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = qidx;
+ pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ qidx += pi->nqsets;
+ }
+
+ s->ethqsets = qidx;
+ s->max_ethqsets = qidx; /* MSI-X may lower it later */
+
+ if (is_offload(adap)) {
+ /*
+ * For offload we use 1 queue/channel if all ports are up to 1G,
+ * otherwise we divide all available queues amongst the channels
+ * capped by the number of available cores.
+ */
+ if (n10g) {
+ i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+ num_online_cpus());
+ s->ofldqsets = roundup(i, adap->params.nports);
+ } else
+ s->ofldqsets = adap->params.nports;
+ /* For RDMA one Rx queue per channel suffices */
+ s->rdmaqs = adap->params.nports;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ struct sge_eth_rxq *r = &s->ethrxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 1024, 64);
+ r->fl.size = 72;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+ s->ethtxq[i].q.size = 1024;
+
+ for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
+ s->ctrlq[i].q.size = 512;
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
+ s->ofldtxq[i].q.size = 1024;
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
+ struct sge_ofld_rxq *r = &s->ofldrxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 1024, 64);
+ r->rspq.uld = CXGB4_ULD_ISCSI;
+ r->fl.size = 72;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
+ struct sge_ofld_rxq *r = &s->rdmarxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 511, 64);
+ r->rspq.uld = CXGB4_ULD_RDMA;
+ r->fl.size = 72;
+ }
+
+ init_rspq(&s->fw_evtq, 6, 0, 512, 64);
+ init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void __devinit reduce_ethqs(struct adapter *adap, int n)
+{
+ int i;
+ struct port_info *pi;
+
+ while (n < adap->sge.ethqsets)
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->nqsets > 1) {
+ pi->nqsets--;
+ adap->sge.ethqsets--;
+ if (adap->sge.ethqsets <= n)
+ break;
+ }
+ }
+
+ n = 0;
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ pi->first_qset = n;
+ n += pi->nqsets;
+ }
+}
+
+/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
+#define EXTRA_VECS 2
+
+static int __devinit enable_msix(struct adapter *adap)
+{
+ int ofld_need = 0;
+ int i, err, want, need;
+ struct sge *s = &adap->sge;
+ unsigned int nchan = adap->params.nports;
+ struct msix_entry entries[MAX_INGQ + 1];
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ want = s->max_ethqsets + EXTRA_VECS;
+ if (is_offload(adap)) {
+ want += s->rdmaqs + s->ofldqsets;
+ /* need nchan for each possible ULD */
+ ofld_need = 2 * nchan;
+ }
+ need = adap->params.nports + EXTRA_VECS + ofld_need;
+
+ while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
+ want = err;
+
+ if (!err) {
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ } else if (err > 0)
+ dev_info(adap->pdev_dev,
+ "only %d MSI-X vectors left, not using MSI-X\n", err);
+ return err;
+}
+
+#undef EXTRA_VECS
+
+static void __devinit print_port_info(struct adapter *adap)
+{
+ static const char *base[] = {
+ "R", "KX4", "T", "KX", "T", "KR", "CX4"
+ };
+
+ int i;
+ char buf[80];
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ const struct port_info *pi = netdev_priv(dev);
+ char *bufp = buf;
+
+ if (!test_bit(i, &adap->registered_device_map))
+ continue;
+
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ bufp += sprintf(bufp, "100/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ bufp += sprintf(bufp, "1000/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s", base[pi->port_type]);
+
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
+ adap->params.vpd.id, adap->params.rev,
+ buf, is_offload(adap) ? "R" : "",
+ adap->params.pci.width,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ if (adap->name == dev->name)
+ netdev_info(dev, "S/N: %s, E/C: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.ec);
+ }
+}
+
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+
+static int __devinit init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int func, i, err;
+ struct port_info *pi;
+ unsigned int highdma = 0;
+ struct adapter *adapter = NULL;
+
+ printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ /* Just info, some other driver may have claimed the device. */
+ dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+ return err;
+ }
+
+ /* We control everything through PF 0 */
+ func = PCI_FUNC(pdev->devfn);
+ if (func > 0)
+ goto sriov;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto out_release_regions;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ highdma = NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+ "coherent allocations\n");
+ goto out_disable_device;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_disable_device;
+ }
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
+
+ adapter->regs = pci_ioremap_bar(pdev, 0);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->msg_enable = dflt_msg_enable;
+ memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->tid_release_lock);
+
+ INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+
+ err = t4_prep_adapter(adapter);
+ if (err)
+ goto out_unmap_bar;
+ err = adap_init0(adapter);
+ if (err)
+ goto out_unmap_bar;
+
+ for_each_port(adapter, i) {
+ struct net_device *netdev;
+
+ netdev = alloc_etherdev_mq(sizeof(struct port_info),
+ MAX_ETH_QSETS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter->port[i] = netdev;
+ pi = netdev_priv(netdev);
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->rx_offload = RX_CSO;
+ pi->port_id = i;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ netdev->irq = pdev->irq;
+
+ netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_GRO | highdma;
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+
+ netdev->netdev_ops = &cxgb4_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ }
+
+ pci_set_drvdata(pdev, adapter);
+
+ if (adapter->flags & FW_OK) {
+ err = t4_port_init(adapter, 0, 0, 0);
+ if (err)
+ goto out_free_dev;
+ }
+
+ /*
+ * Configure queues and allocate tables now, they can be needed as
+ * soon as the first register_netdev completes.
+ */
+ cfg_queues(adapter);
+
+ adapter->l2t = t4_init_l2t();
+ if (!adapter->l2t) {
+ /* We tolerate a lack of L2T, giving up some functionality */
+ dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
+ adapter->params.offload = 0;
+ }
+
+ if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+ dev_warn(&pdev->dev, "could not allocate TID table, "
+ "continuing\n");
+ adapter->params.offload = 0;
+ }
+
+ /*
+ * The card is now ready to go. If any errors occur during device
+ * registration we do not fail the whole card but rather proceed only
+ * with the ports we manage to register successfully. However we must
+ * register at least one net device.
+ */
+ for_each_port(adapter, i) {
+ err = register_netdev(adapter->port[i]);
+ if (err)
+ dev_warn(&pdev->dev,
+ "cannot register net device %s, skipping\n",
+ adapter->port[i]->name);
+ else {
+ /*
+ * Change the name we use for messages to the name of
+ * the first successfully registered interface.
+ */
+ if (!adapter->registered_device_map)
+ adapter->name = adapter->port[i]->name;
+
+ __set_bit(i, &adapter->registered_device_map);
+ adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
+ }
+ }
+ if (!adapter->registered_device_map) {
+ dev_err(&pdev->dev, "could not register any net devices\n");
+ goto out_free_dev;
+ }
+
+ if (cxgb4_debugfs_root) {
+ adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
+ cxgb4_debugfs_root);
+ setup_debugfs(adapter);
+ }
+
+ /* See what interrupts we'll be using */
+ if (msi > 1 && enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ adapter->flags |= USING_MSI;
+
+ if (is_offload(adapter))
+ attach_ulds(adapter);
+
+ print_port_info(adapter);
+
+sriov:
+#ifdef CONFIG_PCI_IOV
+ if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+ if (pci_enable_sriov(pdev, num_vf[func]) == 0)
+ dev_info(&pdev->dev,
+ "instantiated %u virtual functions\n",
+ num_vf[func]);
+#endif
+ return 0;
+
+ out_free_dev:
+ t4_free_mem(adapter->tids.tid_tab);
+ t4_free_mem(adapter->l2t);
+ for_each_port(adapter, i)
+ if (adapter->port[i])
+ free_netdev(adapter->port[i]);
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, 0);
+ out_unmap_bar:
+ iounmap(adapter->regs);
+ out_free_adapter:
+ kfree(adapter);
+ out_disable_device:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ out_release_regions:
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_disable_sriov(pdev);
+
+ if (adapter) {
+ int i;
+
+ if (is_offload(adapter))
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (test_bit(i, &adapter->registered_device_map))
+ unregister_netdev(adapter->port[i]);
+
+ if (adapter->debugfs_root)
+ debugfs_remove_recursive(adapter->debugfs_root);
+
+ t4_sge_stop(adapter);
+ t4_free_sge_resources(adapter);
+ t4_free_mem(adapter->l2t);
+ t4_free_mem(adapter->tids.tid_tab);
+ disable_msi(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i])
+ free_netdev(adapter->port[i]);
+
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, 0);
+ iounmap(adapter->regs);
+ kfree(adapter);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ } else if (PCI_FUNC(pdev->devfn) > 0)
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver cxgb4_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cxgb4_pci_tbl,
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+};
+
+static int __init cxgb4_init_module(void)
+{
+ int ret;
+
+ /* Debugfs support is optional, just warn if this fails */
+ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!cxgb4_debugfs_root)
+ pr_warning("could not create debugfs entry, continuing\n");
+
+ ret = pci_register_driver(&cxgb4_driver);
+ if (ret < 0)
+ debugfs_remove(cxgb4_debugfs_root);
+ return ret;
+}
+
+static void __exit cxgb4_cleanup_module(void)
+{
+ pci_unregister_driver(&cxgb4_driver);
+ debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
+}
+
+module_init(cxgb4_init_module);
+module_exit(cxgb4_cleanup_module);
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
new file mode 100644
index 000000000000..5b98546ac92d
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -0,0 +1,239 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_OFLD_H
+#define __CXGB4_OFLD_H
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <asm/atomic.h>
+
+/* CPL message priority levels */
+enum {
+ CPL_PRIORITY_DATA = 0, /* data messages */
+ CPL_PRIORITY_SETUP = 1, /* connection setup messages */
+ CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
+ CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
+ CPL_PRIORITY_ACK = 1, /* RX ACK messages */
+ CPL_PRIORITY_CONTROL = 1 /* control messages */
+};
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
+ FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_CPL(w, cpl, tid) do { \
+ INIT_TP_WR(w, tid); \
+ OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+ (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+ FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/* Special asynchronous notification message */
+#define CXGB4_MSG_AN ((void *)1)
+
+struct serv_entry {
+ void *data;
+};
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables. The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+
+ struct serv_entry *stid_tab;
+ unsigned long *stid_bmap;
+ unsigned int nstids;
+ unsigned int stid_base;
+
+ union aopen_entry *atid_tab;
+ unsigned int natids;
+
+ unsigned int nftids;
+ unsigned int ftid_base;
+
+ spinlock_t atid_lock ____cacheline_aligned_in_smp;
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ spinlock_t stid_lock;
+ unsigned int stids_in_use;
+
+ atomic_t tids_in_use;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+ return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+ return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
+{
+ stid -= t->stid_base;
+ return stid < t->nstids ? t->stid_tab[stid].data : NULL;
+}
+
+static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
+ unsigned int tid)
+{
+ t->tid_tab[tid] = data;
+ atomic_inc(&t->tids_in_use);
+}
+
+int cxgb4_alloc_atid(struct tid_info *t, void *data);
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
+void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
+void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+ unsigned int tid);
+
+struct in6_addr;
+
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, unsigned int queue);
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+ const struct in6_addr *sip, __be16 sport,
+ unsigned int queue);
+
+static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
+{
+ skb_set_queue_mapping(skb, (queue << 1) | prio);
+}
+
+enum cxgb4_uld {
+ CXGB4_ULD_RDMA,
+ CXGB4_ULD_ISCSI,
+ CXGB4_ULD_MAX
+};
+
+enum cxgb4_state {
+ CXGB4_STATE_UP,
+ CXGB4_STATE_START_RECOVERY,
+ CXGB4_STATE_DOWN,
+ CXGB4_STATE_DETACH
+};
+
+struct pci_dev;
+struct l2t_data;
+struct net_device;
+struct pkt_gl;
+struct tp_tcp_stats;
+
+struct cxgb4_range {
+ unsigned int start;
+ unsigned int size;
+};
+
+struct cxgb4_virt_res { /* virtualized HW resources */
+ struct cxgb4_range ddp;
+ struct cxgb4_range iscsi;
+ struct cxgb4_range stag;
+ struct cxgb4_range rq;
+ struct cxgb4_range pbl;
+};
+
+/*
+ * Block of information the LLD provides to ULDs attaching to a device.
+ */
+struct cxgb4_lld_info {
+ struct pci_dev *pdev; /* associated PCI device */
+ struct l2t_data *l2t; /* L2 table */
+ struct tid_info *tids; /* TID table */
+ struct net_device **ports; /* device ports */
+ const struct cxgb4_virt_res *vr; /* assorted HW resources */
+ const unsigned short *mtus; /* MTU table */
+ const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
+ unsigned short nrxq; /* # of Rx queues */
+ unsigned short ntxq; /* # of Tx queues */
+ unsigned char nchan:4; /* # of channels */
+ unsigned char nports:4; /* # of ports */
+ unsigned char wr_cred; /* WR 16-byte credits */
+ unsigned char adapter_type; /* type of adapter */
+ unsigned char fw_api_ver; /* FW API version */
+ unsigned int fw_vers; /* FW version */
+ unsigned int iscsi_iolen; /* iSCSI max I/O length */
+ unsigned short udb_density; /* # of user DB/page */
+ unsigned short ucq_density; /* # of user CQs/page */
+ void __iomem *gts_reg; /* address of GTS register */
+ void __iomem *db_reg; /* address of kernel doorbell */
+};
+
+struct cxgb4_uld_info {
+ const char *name;
+ void *(*add)(const struct cxgb4_lld_info *p);
+ int (*rx_handler)(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl);
+ int (*state_change)(void *handle, enum cxgb4_state new_state);
+};
+
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
+int cxgb4_unregister_uld(enum cxgb4_uld type);
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+unsigned int cxgb4_port_chan(const struct net_device *dev);
+unsigned int cxgb4_port_viid(const struct net_device *dev);
+unsigned int cxgb4_port_idx(const struct net_device *dev);
+struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx);
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6);
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+ const unsigned int *pgsz_order);
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len);
+#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
new file mode 100644
index 000000000000..9f96724a133a
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.c
@@ -0,0 +1,624 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <net/neighbour.h>
+#include "cxgb4.h"
+#include "l2t.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+#define VLAN_NONE 0xfff
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define F_SYNC_WR (1 << 12)
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+struct l2t_data {
+ rwlock_t lock;
+ atomic_t nfree; /* number of free entries */
+ struct l2t_entry *rover; /* starting point for next allocation */
+ struct l2t_entry l2tab[L2T_SIZE];
+};
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+ return e->vlan >> 13;
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+ if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ atomic_dec(&d->nfree);
+}
+
+/*
+ * To avoid having to check address families we do not allow v4 and v6
+ * neighbors to be on the same hash chain. We keep v4 entries in the first
+ * half of available hash buckets and v6 in the second.
+ */
+enum {
+ L2T_SZ_HALF = L2T_SIZE / 2,
+ L2T_HASH_MASK = L2T_SZ_HALF - 1
+};
+
+static inline unsigned int arp_hash(const u32 *key, int ifindex)
+{
+ return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+}
+
+static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+{
+ u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+ return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+}
+
+static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+{
+ return addr_len == 4 ? arp_hash(addr, ifindex) :
+ ipv6_hash(addr, ifindex);
+}
+
+/*
+ * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
+ * whether the L2T entry and the address are of the same address family.
+ * Callers ensure an address is only checked against L2T entries of the same
+ * family, something made trivial by the separation of IP and IPv6 hash chains
+ * mentioned above. Returns 0 if there's a match,
+ */
+static int addreq(const struct l2t_entry *e, const u32 *addr)
+{
+ if (e->v6)
+ return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
+ (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
+ return e->addr[0] ^ addr[0];
+}
+
+static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+ neigh_hold(n);
+ if (e->neigh)
+ neigh_release(e->neigh);
+ e->neigh = n;
+}
+
+/*
+ * Write an L2T entry. Must be called with the entry locked.
+ * The write may be synchronous or asynchronous.
+ */
+static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
+{
+ struct sk_buff *skb;
+ struct cpl_l2t_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
+ e->idx | (sync ? F_SYNC_WR : 0) |
+ TID_QID(adap->sge.fw_evtq.abs_id)));
+ req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
+ req->l2t_idx = htons(e->idx);
+ req->vlan = htons(e->vlan);
+ if (e->neigh)
+ memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+ t4_ofld_send(adap, skb);
+
+ if (sync && e->state != L2T_STATE_SWITCHING)
+ e->state = L2T_STATE_SYNC_WRITE;
+ return 0;
+}
+
+/*
+ * Send packets waiting in an L2T entry's ARP queue. Must be called with the
+ * entry locked.
+ */
+static void send_pending(struct adapter *adap, struct l2t_entry *e)
+{
+ while (e->arpq_head) {
+ struct sk_buff *skb = e->arpq_head;
+
+ e->arpq_head = skb->next;
+ skb->next = NULL;
+ t4_ofld_send(adap, skb);
+ }
+ e->arpq_tail = NULL;
+}
+
+/*
+ * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
+ * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
+ * index it refers to.
+ */
+void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
+{
+ unsigned int tid = GET_TID(rpl);
+ unsigned int idx = tid & (L2T_SIZE - 1);
+
+ if (unlikely(rpl->status != CPL_ERR_NONE)) {
+ dev_err(adap->pdev_dev,
+ "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, idx);
+ return;
+ }
+
+ if (tid & F_SYNC_WR) {
+ struct l2t_entry *e = &adap->l2t->l2tab[idx];
+
+ spin_lock(&e->lock);
+ if (e->state != L2T_STATE_SWITCHING) {
+ send_pending(adap, e);
+ e->state = (e->neigh->nud_state & NUD_STALE) ?
+ L2T_STATE_STALE : L2T_STATE_VALID;
+ }
+ spin_unlock(&e->lock);
+ }
+}
+
+/*
+ * Add a packet to an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+ skb->next = NULL;
+ if (e->arpq_head)
+ e->arpq_tail->next = skb;
+ else
+ e->arpq_head = skb;
+ e->arpq_tail = skb;
+}
+
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE)
+ e->state = L2T_STATE_VALID;
+ spin_unlock_bh(&e->lock);
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return t4_ofld_send(adap, skb);
+ case L2T_STATE_RESOLVING:
+ case L2T_STATE_SYNC_WRITE:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_SYNC_WRITE &&
+ e->state != L2T_STATE_RESOLVING) {
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ arpq_enqueue(e, skb);
+ spin_unlock_bh(&e->lock);
+
+ if (e->state == L2T_STATE_RESOLVING &&
+ !neigh_event_send(e->neigh, NULL)) {
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
+ write_l2e(adap, e, 1);
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_l2t_send);
+
+/*
+ * Allocate a free L2T entry. Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+ struct l2t_entry *end, *e, **p;
+
+ if (!atomic_read(&d->nfree))
+ return NULL;
+
+ /* there's definitely a free entry */
+ for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+ if (atomic_read(&e->refcnt) == 0)
+ goto found;
+
+ for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
+ ;
+found:
+ d->rover = e + 1;
+ atomic_dec(&d->nfree);
+
+ /*
+ * The entry we found may be an inactive entry that is
+ * presently in the hash table. We need to remove it.
+ */
+ if (e->state < L2T_STATE_SWITCHING)
+ for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
+ if (*p == e) {
+ *p = e->next;
+ e->next = NULL;
+ break;
+ }
+
+ e->state = L2T_STATE_UNUSED;
+ return e;
+}
+
+/*
+ * Called when an L2T entry has no more users.
+ */
+static void t4_l2e_free(struct l2t_entry *e)
+{
+ struct l2t_data *d;
+
+ spin_lock_bh(&e->lock);
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->neigh) {
+ neigh_release(e->neigh);
+ e->neigh = NULL;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+
+ d = container_of(e, struct l2t_data, l2tab[e->idx]);
+ atomic_inc(&d->nfree);
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e)
+{
+ if (atomic_dec_and_test(&e->refcnt))
+ t4_l2e_free(e);
+}
+EXPORT_SYMBOL(cxgb4_l2t_release);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+ unsigned int nud_state;
+
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+ nud_state = neigh->nud_state;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+ !(nud_state & NUD_VALID))
+ e->state = L2T_STATE_RESOLVING;
+ else if (nud_state & NUD_CONNECTED)
+ e->state = L2T_STATE_VALID;
+ else
+ e->state = L2T_STATE_STALE;
+ spin_unlock(&e->lock);
+}
+
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+ const struct net_device *physdev,
+ unsigned int priority)
+{
+ u8 lport;
+ u16 vlan;
+ struct l2t_entry *e;
+ int addr_len = neigh->tbl->key_len;
+ u32 *addr = (u32 *)neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = addr_hash(addr, addr_len, ifidx);
+
+ if (neigh->dev->flags & IFF_LOOPBACK)
+ lport = netdev2pinfo(physdev)->tx_chan + 4;
+ else
+ lport = netdev2pinfo(physdev)->lport;
+
+ if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+ vlan = vlan_dev_vlan_id(neigh->dev);
+ else
+ vlan = VLAN_NONE;
+
+ write_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (!addreq(e, addr) && e->ifindex == ifidx &&
+ e->vlan == vlan && e->lport == lport) {
+ l2t_hold(d, e);
+ if (atomic_read(&e->refcnt) == 1)
+ reuse_entry(e, neigh);
+ goto done;
+ }
+
+ /* Need to allocate a new entry */
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_RESOLVING;
+ memcpy(e->addr, addr, addr_len);
+ e->ifindex = ifidx;
+ e->hash = hash;
+ e->lport = lport;
+ e->v6 = addr_len == 16;
+ atomic_set(&e->refcnt, 1);
+ neigh_replace(e, neigh);
+ e->vlan = vlan;
+ e->next = d->l2tab[hash].first;
+ d->l2tab[hash].first = e;
+ spin_unlock(&e->lock);
+ }
+done:
+ write_unlock_bh(&d->lock);
+ return e;
+}
+EXPORT_SYMBOL(cxgb4_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head. If a packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
+{
+ while (arpq) {
+ struct sk_buff *skb = arpq;
+ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ arpq = skb->next;
+ skb->next = NULL;
+ if (cb->arp_err_handler)
+ cb->arp_err_handler(cb->handle, skb);
+ else
+ t4_ofld_send(adap, skb);
+ }
+}
+
+/*
+ * Called when the host's neighbor layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
+{
+ struct l2t_entry *e;
+ struct sk_buff *arpq = NULL;
+ struct l2t_data *d = adap->l2t;
+ int addr_len = neigh->tbl->key_len;
+ u32 *addr = (u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = addr_hash(addr, addr_len, ifidx);
+
+ read_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (!addreq(e, addr) && e->ifindex == ifidx) {
+ spin_lock(&e->lock);
+ if (atomic_read(&e->refcnt))
+ goto found;
+ spin_unlock(&e->lock);
+ break;
+ }
+ read_unlock_bh(&d->lock);
+ return;
+
+ found:
+ read_unlock(&d->lock);
+
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+
+ if (e->state == L2T_STATE_RESOLVING) {
+ if (neigh->nud_state & NUD_FAILED) {
+ arpq = e->arpq_head;
+ e->arpq_head = e->arpq_tail = NULL;
+ } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
+ e->arpq_head) {
+ write_l2e(adap, e, 1);
+ }
+ } else {
+ e->state = neigh->nud_state & NUD_CONNECTED ?
+ L2T_STATE_VALID : L2T_STATE_STALE;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
+ write_l2e(adap, e, 0);
+ }
+
+ spin_unlock_bh(&e->lock);
+
+ if (arpq)
+ handle_failed_resolution(adap, arpq);
+}
+
+/*
+ * Allocate an L2T entry for use by a switching rule. Such entries need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+{
+ struct l2t_entry *e;
+
+ write_lock_bh(&d->lock);
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_SWITCHING;
+ atomic_set(&e->refcnt, 1);
+ spin_unlock(&e->lock);
+ }
+ write_unlock_bh(&d->lock);
+ return e;
+}
+
+/*
+ * Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr)
+{
+ e->vlan = vlan;
+ e->lport = port;
+ memcpy(e->dmac, eth_addr, ETH_ALEN);
+ return write_l2e(adap, e, 0);
+}
+
+struct l2t_data *t4_init_l2t(void)
+{
+ int i;
+ struct l2t_data *d;
+
+ d = t4_alloc_mem(sizeof(*d));
+ if (!d)
+ return NULL;
+
+ d->rover = d->l2tab;
+ atomic_set(&d->nfree, L2T_SIZE);
+ rwlock_init(&d->lock);
+
+ for (i = 0; i < L2T_SIZE; ++i) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ spin_lock_init(&d->l2tab[i].lock);
+ atomic_set(&d->l2tab[i].refcnt, 0);
+ }
+ return d;
+}
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct l2t_entry *l2tab = seq->private;
+
+ return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+}
+
+static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ v = l2t_get_idx(seq, *pos);
+ if (v)
+ ++*pos;
+ return v;
+}
+
+static void l2t_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static char l2e_state(const struct l2t_entry *e)
+{
+ switch (e->state) {
+ case L2T_STATE_VALID: return 'V';
+ case L2T_STATE_STALE: return 'S';
+ case L2T_STATE_SYNC_WRITE: return 'W';
+ case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
+ case L2T_STATE_SWITCHING: return 'X';
+ default:
+ return 'U';
+ }
+}
+
+static int l2t_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, " Idx IP address "
+ "Ethernet address VLAN/P LP State Users Port\n");
+ else {
+ char ip[60];
+ struct l2t_entry *e = v;
+
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_SWITCHING)
+ ip[0] = '\0';
+ else
+ sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
+ seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
+ e->idx, ip, e->dmac,
+ e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
+ l2e_state(e), atomic_read(&e->refcnt),
+ e->neigh ? e->neigh->dev->name : "");
+ spin_unlock_bh(&e->lock);
+ }
+ return 0;
+}
+
+static const struct seq_operations l2t_seq_ops = {
+ .start = l2t_seq_start,
+ .next = l2t_seq_next,
+ .stop = l2t_seq_stop,
+ .show = l2t_seq_show
+};
+
+static int l2t_seq_open(struct inode *inode, struct file *file)
+{
+ int rc = seq_open(file, &l2t_seq_ops);
+
+ if (!rc) {
+ struct adapter *adap = inode->i_private;
+ struct seq_file *seq = file->private_data;
+
+ seq->private = adap->l2t->l2tab;
+ }
+ return rc;
+}
+
+const struct file_operations t4_l2t_fops = {
+ .owner = THIS_MODULE,
+ .open = l2t_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
new file mode 100644
index 000000000000..643f27ed3cf4
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.h
@@ -0,0 +1,110 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_L2T_H
+#define __CXGB4_L2T_H
+
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <asm/atomic.h>
+
+struct adapter;
+struct l2t_data;
+struct neighbour;
+struct net_device;
+struct file_operations;
+struct cpl_l2t_write_rpl;
+
+/*
+ * Each L2T entry plays multiple roles. First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution. Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer. Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+ u16 state; /* entry state */
+ u16 idx; /* entry index */
+ u32 addr[4]; /* next hop IP or IPv6 address */
+ int ifindex; /* neighbor's net_device's ifindex */
+ struct neighbour *neigh; /* associated neighbour */
+ struct l2t_entry *first; /* start of hash chain */
+ struct l2t_entry *next; /* next l2t_entry on chain */
+ struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
+ struct sk_buff *arpq_tail;
+ spinlock_t lock;
+ atomic_t refcnt; /* entry reference count */
+ u16 hash; /* hash bucket the entry is on */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ u8 v6; /* whether entry is for IPv6 */
+ u8 lport; /* associated offload logical interface */
+ u8 dmac[ETH_ALEN]; /* neighbour's MAC address */
+};
+
+typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+ void *handle;
+ arp_err_handler_t arp_err_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
+ arp_err_handler_t handler)
+{
+ L2T_SKB_CB(skb)->handle = handle;
+ L2T_SKB_CB(skb)->arp_err_handler = handler;
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e);
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+ struct l2t_entry *e);
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+ const struct net_device *physdev,
+ unsigned int priority);
+
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr);
+struct l2t_data *t4_init_l2t(void);
+void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+
+extern const struct file_operations t4_l2t_fops;
+#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
new file mode 100644
index 000000000000..14adc58e71c3
--- /dev/null
+++ b/drivers/net/cxgb4/sge.c
@@ -0,0 +1,2431 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+/*
+ * Rx buffer size. We use largish buffers if possible but settle for single
+ * pages under memory shortage.
+ */
+#if PAGE_SHIFT >= 16
+# define FL_PG_ORDER 0
+#else
+# define FL_PG_ORDER (16 - PAGE_SHIFT)
+#endif
+
+/* RX_PULL_LEN should be <= RX_COPY_THRES */
+#define RX_COPY_THRES 256
+#define RX_PULL_LEN 128
+
+/*
+ * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
+ * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
+ */
+#define RX_PKT_SKB_LEN 512
+
+/* Ethernet header padding prepended to RX_PKTs */
+#define RX_PKT_PAD 2
+
+/*
+ * Max number of Tx descriptors we clean up at a time. Should be modest as
+ * freeing skbs isn't cheap and it happens while holding locks. We just need
+ * to free packets faster than they arrive, we eventually catch up and keep
+ * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
+ */
+#define MAX_TX_RECLAIM 16
+
+/*
+ * Max number of Rx buffers we replenish at a time. Again keep this modest,
+ * allocating buffers isn't cheap either.
+ */
+#define MAX_RX_REFILL 16U
+
+/*
+ * Period of the Rx queue check timer. This timer is infrequent as it has
+ * something to do only when the system experiences severe memory shortage.
+ */
+#define RX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Period of the Tx queue check timer.
+ */
+#define TX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Max number of Tx descriptors to be reclaimed by the Tx timer.
+ */
+#define MAX_TIMER_TX_RECLAIM 100
+
+/*
+ * Timer index used when backing off due to memory shortage.
+ */
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
+ * attempt to refill it.
+ */
+#define FL_STARVE_THRES 4
+
+/*
+ * Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
+/*
+ * Suspension threshold for non-Ethernet Tx queues. We require enough room
+ * for a full sized WR.
+ */
+#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 128
+
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+enum {
+ /* packet alignment in FL buffers */
+ FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
+ /* egress status entry size */
+ STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ struct page *page;
+ dma_addr_t dma_addr;
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning.
+ */
+enum {
+ RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
+ RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+};
+
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
+{
+ return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+static inline bool is_buf_mapped(const struct rx_sw_desc *d)
+{
+ return !(d->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ * txq_avail - return the number of available slots in a Tx queue
+ * @q: the Tx queue
+ *
+ * Returns the number of descriptors in a Tx queue available to write new
+ * packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+/**
+ * fl_cap - return the capacity of a free-buffer list
+ * @fl: the FL
+ *
+ * Returns the capacity of a free-buffer list. The capacity is less than
+ * the size because one descriptor needs to be left unpopulated, otherwise
+ * HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - 8; /* 1 descriptor = 8 buffers */
+}
+
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+ return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto out_err;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+ *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+out_err:
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+static void unmap_skb(struct device *dev, const struct sk_buff *skb,
+ const dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+ for (fp = si->frags; fp < end; fp++)
+ dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
+}
+
+/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
+}
+#endif
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+ const struct ulptx_sgl *sgl, const struct sge_txq *q)
+{
+ const struct ulptx_sge_pair *p;
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (likely(skb_headlen(skb)))
+ dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+ DMA_TO_DEVICE);
+ else {
+ dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+ DMA_TO_DEVICE);
+ nfrags--;
+ }
+
+ /*
+ * the complexity below is because of the possibility of a wrap-around
+ * in the middle of an SGL
+ */
+ for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+ if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
+unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p++;
+ } else if ((u8 *)p == (u8 *)q->stat) {
+ p = (const struct ulptx_sge_pair *)q->desc;
+ goto unmap;
+ } else if ((u8 *)p + 8 == (u8 *)q->stat) {
+ const __be64 *addr = (const __be64 *)q->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[1]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[2];
+ } else {
+ const __be64 *addr = (const __be64 *)q->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[1];
+ }
+ }
+ if (nfrags) {
+ __be64 addr;
+
+ if ((u8 *)p == (u8 *)q->stat)
+ p = (const struct ulptx_sge_pair *)q->desc;
+ addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
+ *(const __be64 *)q->desc;
+ dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
+ DMA_TO_DEVICE);
+ }
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+ unsigned int n, bool unmap)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = q->cidx;
+ struct device *dev = adap->pdev_dev;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->skb) { /* an SGL is present */
+ if (unmap)
+ unmap_sgl(dev, d->skb, d->sgl, q);
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ hw_cidx -= q->cidx;
+ return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adap: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+ bool unmap)
+{
+ int avail = reclaimable(q);
+
+ if (avail) {
+ /*
+ * Limit the amount of clean up work we do at a time to keep
+ * the Tx lock hold time O(1).
+ */
+ if (avail > MAX_TX_RECLAIM)
+ avail = MAX_TX_RECLAIM;
+
+ free_tx_desc(adap, q, avail, unmap);
+ q->in_use -= avail;
+ }
+}
+
+static inline int get_buf_size(const struct rx_sw_desc *d)
+{
+#if FL_PG_ORDER > 0
+ return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
+ PAGE_SIZE;
+#else
+ return PAGE_SIZE;
+#endif
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @adap: the adapter
+ * @q: the SGE free list to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE free-buffer Rx queue. The
+ * buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
+{
+ while (n--) {
+ struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+ if (is_buf_mapped(d))
+ dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+ get_buf_size(d), PCI_DMA_FROMDEVICE);
+ put_page(d->page);
+ d->page = NULL;
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+ }
+}
+
+/**
+ * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ * @adap: the adapter
+ * @q: the SGE free list
+ *
+ * Unmap the current buffer on an SGE free-buffer Rx queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
+{
+ struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+ if (is_buf_mapped(d))
+ dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+ get_buf_size(d), PCI_DMA_FROMDEVICE);
+ d->page = NULL;
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= 8) {
+ wmb();
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+ QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ q->pend_cred &= 7;
+ }
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
+ dma_addr_t mapping)
+{
+ sd->page = pg;
+ sd->dma_addr = mapping; /* includes size low bits */
+}
+
+/**
+ * refill_fl - refill an SGE Rx buffer ring
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for the allocations
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. If afterwards the queue is
+ * found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ * Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
+ gfp_t gfp)
+{
+ struct page *pg;
+ dma_addr_t mapping;
+ unsigned int cred = q->avail;
+ __be64 *d = &q->desc[q->pidx];
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+
+ gfp |= __GFP_NOWARN; /* failures are expected */
+
+#if FL_PG_ORDER > 0
+ /*
+ * Prefer large buffers
+ */
+ while (n) {
+ pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
+ if (unlikely(!pg)) {
+ q->large_alloc_failed++;
+ break; /* fall back to single pages */
+ }
+
+ mapping = dma_map_page(adap->pdev_dev, pg, 0,
+ PAGE_SIZE << FL_PG_ORDER,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+ __free_pages(pg, FL_PG_ORDER);
+ goto out; /* do not try small pages for this error */
+ }
+ mapping |= RX_LARGE_BUF;
+ *d++ = cpu_to_be64(mapping);
+
+ set_rx_sw_desc(sd, pg, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ n--;
+ }
+#endif
+
+ while (n--) {
+ pg = __netdev_alloc_page(adap->port[0], gfp);
+ if (unlikely(!pg)) {
+ q->alloc_failed++;
+ break;
+ }
+
+ mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+ netdev_free_page(adap->port[0], pg);
+ goto out;
+ }
+ *d++ = cpu_to_be64(mapping);
+
+ set_rx_sw_desc(sd, pg, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ }
+
+out: cred = q->avail - cred;
+ q->pend_cred += cred;
+ ring_fl_db(adap, q);
+
+ if (unlikely(fl_starving(q))) {
+ smp_wmb();
+ set_bit(q->cntxt_id, adap->sge.starving_fl);
+ }
+
+ return cred;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+ GFP_ATOMIC);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ * @stat_size: extra space in HW ring for status information
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the bus address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t *phys, void *metadata,
+ size_t stat_size)
+{
+ size_t len = nelem * elem_size + stat_size;
+ void *s = NULL;
+ void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ if (sw_size) {
+ s = kcalloc(nelem, sw_size, GFP_KERNEL);
+
+ if (!s) {
+ dma_free_coherent(dev, len, p, *phys);
+ return NULL;
+ }
+ }
+ if (metadata)
+ *(void **)metadata = s;
+ memset(p, 0, len);
+ return p;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ BUG_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit as
+ * immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet Tx WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a Tx WR for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ if (is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+ if (skb_shinfo(skb)->gso_size)
+ flits += 2;
+ return flits;
+}
+
+/**
+ * calc_tx_descs - calculate the number of Tx descriptors for a packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+ return flits_to_desc(calc_tx_flits(skb));
+}
+
+/**
+ * write_sgl - populate a scatter/gather list for a packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into skb main-body data to include in the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ *
+ * Generates a gather list for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @sgl must be 16-byte
+ * aligned and within a Tx descriptor with available space. @end points
+ * right after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ const struct skb_shared_info *si = skb_shinfo(skb);
+ unsigned int nfrags = si->nr_frags;
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+ len = skb_headlen(skb) - start;
+ if (likely(len)) {
+ sgl->len0 = htonl(len);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ nfrags++;
+ } else {
+ sgl->len0 = htonl(si->frags[0].size);
+ sgl->addr0 = cpu_to_be64(addr[1]);
+ }
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->addr[0] = cpu_to_be64(addr[i]);
+ to->addr[1] = cpu_to_be64(addr[++i]);
+ }
+ if (nfrags) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(0);
+ to->addr[0] = cpu_to_be64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+/**
+ * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+{
+ wmb(); /* write descriptors before telling HW */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
+}
+
+/**
+ * inline_tx_skb - inline a packet's data into Tx descriptors
+ * @skb: the packet
+ * @q: the Tx queue where the packet will be inlined
+ * @pos: starting position in the Tx queue where to inline the packet
+ *
+ * Inline a packet's contents directly into Tx descriptors, starting at
+ * the given position within the Tx DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos)
+{
+ u64 *p;
+ int left = (void *)q->stat - pos;
+
+ if (likely(skb->len <= left)) {
+ if (likely(!skb->data_len))
+ skb_copy_from_linear_data(skb, pos, skb->len);
+ else
+ skb_copy_bits(skb, 0, pos, skb->len);
+ pos += skb->len;
+ } else {
+ skb_copy_bits(skb, 0, pos, left);
+ skb_copy_bits(skb, left, q->desc, skb->len - left);
+ pos = (void *)q->desc + (skb->len - left);
+ }
+
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8)
+ *p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+ int csum_type;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->version == 4) {
+ if (iph->protocol == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP;
+ else if (iph->protocol == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP;
+ else {
+nocsum: /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return TXPKT_L4CSUM_DIS;
+ }
+ } else {
+ /*
+ * this doesn't work with extension headers
+ */
+ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP6;
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP6;
+ else
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP))
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ else {
+ int start = skb_transport_offset(skb);
+
+ return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
+ TXPKT_CSUM_LOC(start + skb->csum_offset);
+ }
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/**
+ * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
+ */
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 wr_mid;
+ u64 cntrl, *end;
+ int qidx, credits;
+ unsigned int flits, ndesc;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ const struct port_info *pi;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ const struct skb_shared_info *ssi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+
+ /*
+ * The chip min packet length is 10 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(skb->len < ETH_HLEN)) {
+out_free: dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!is_eth_imm(skb) &&
+ unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ }
+
+ wr = (void *)&q->q.desc[q->q.pidx];
+ wr->equiq_to_len16 = htonl(wr_mid);
+ wr->r3 = cpu_to_be64(0);
+ end = (u64 *)wr + flits;
+
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso *lso = (void *)wr;
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN(sizeof(*lso)));
+ lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE | LSO_LAST_SLICE |
+ LSO_IPV6(v6) |
+ LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(ssi->gso_size);
+ lso->seqno_offset = htonl(0);
+ lso->len = htonl(skb->len);
+ cpl = (void *)(lso + 1);
+ cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN(l3hdr_len) |
+ TXPKT_ETHHDR_LEN(eth_xtra_len);
+ q->tso++;
+ q->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN(len));
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ q->tx_cso++;
+ } else
+ cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0));
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ if (is_eth_imm(skb)) {
+ inline_tx_skb(skb, &q->q, cpl + 1);
+ dev_kfree_skb(skb);
+ } else {
+ int last_desc;
+
+ write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+ addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ }
+
+ txq_advance(&q->q, ndesc);
+
+ ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * ctrlq_check_stop - check if a control queue is full and should stop
+ * @q: the queue
+ * @wr: most recent WR written to the queue
+ *
+ * Check if a control queue has become full and should be stopped.
+ * We clean up control queue descriptors very lazily, only when we are out.
+ * If the queue is still full after reclaiming any completed descriptors
+ * we suspend it and have the last WR wake it up.
+ */
+static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
+{
+ reclaim_completed_tx_imm(&q->q);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+ wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ q->q.stops++;
+ q->full = 1;
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @skb: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+
+ if (unlikely(!is_imm(skb))) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
+ spin_lock(&q->sendq.lock);
+
+ if (unlikely(q->full)) {
+ skb->priority = ndesc; /* save for restart */
+ __skb_queue_tail(&q->sendq, skb);
+ spin_unlock(&q->sendq.lock);
+ return NET_XMIT_CN;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ inline_tx_skb(skb, &q->q, wr);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
+ ctrlq_check_stop(q, wr);
+
+ ring_tx_db(q->adap, &q->q, ndesc);
+ spin_unlock(&q->sendq.lock);
+
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ctrlq - restart a suspended control queue
+ * @data: the control queue to restart
+ *
+ * Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+ struct sk_buff *skb;
+ unsigned int written = 0;
+ struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+
+ spin_lock(&q->sendq.lock);
+ reclaim_completed_tx_imm(&q->q);
+ BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
+
+ while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
+ struct fw_wr_hdr *wr;
+ unsigned int ndesc = skb->priority; /* previously saved */
+
+ /*
+ * Write descriptors and free skbs outside the lock to limit
+ * wait times. q->full is still set so new skbs will be queued.
+ */
+ spin_unlock(&q->sendq.lock);
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ inline_tx_skb(skb, &q->q, wr);
+ kfree_skb(skb);
+
+ written += ndesc;
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+ unsigned long old = q->q.stops;
+
+ ctrlq_check_stop(q, wr);
+ if (q->q.stops != old) { /* suspended anew */
+ spin_lock(&q->sendq.lock);
+ goto ringdb;
+ }
+ }
+ if (written > 16) {
+ ring_tx_db(q->adap, &q->q, written);
+ written = 0;
+ }
+ spin_lock(&q->sendq.lock);
+ }
+ q->full = 0;
+ringdb: if (written)
+ ring_tx_db(q->adap, &q->q, written);
+ spin_unlock(&q->sendq.lock);
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @adap: the adapter
+ * @skb: the packet containing the management message
+ *
+ * Send a management message through control queue 0.
+ */
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data. We currently use the same limit as for Ethernet packets.
+ */
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_IMM_TX_PKT_LEN;
+}
+
+/**
+ * calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+
+ flits = skb_transport_offset(skb) / 8U; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb->tail != skb->transport_header)
+ cnt++;
+ return flits + sgl_len(cnt);
+}
+
+/**
+ * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
+ * @adap: the adapter
+ * @q: the queue to stop
+ *
+ * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
+ * inability to map packets. A periodic timer attempts to restart
+ * queues so marked.
+ */
+static void txq_stop_maperr(struct sge_ofld_txq *q)
+{
+ q->mapping_err++;
+ q->q.stops++;
+ set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr);
+}
+
+/**
+ * ofldtxq_stop - stop an offload Tx queue that has become full
+ * @q: the queue to stop
+ * @skb: the packet causing the queue to become full
+ *
+ * Stops an offload Tx queue that has become full and modifies the packet
+ * being written to request a wakeup.
+ */
+static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+ struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
+
+ wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ q->q.stops++;
+ q->full = 1;
+}
+
+/**
+ * service_ofldq - restart a suspended offload queue
+ * @q: the offload queue
+ *
+ * Services an offload Tx queue by moving packets from its packet queue
+ * to the HW Tx ring. The function starts and ends with the queue locked.
+ */
+static void service_ofldq(struct sge_ofld_txq *q)
+{
+ u64 *pos;
+ int credits;
+ struct sk_buff *skb;
+ unsigned int written = 0;
+ unsigned int flits, ndesc;
+
+ while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
+ /*
+ * We drop the lock but leave skb on sendq, thus retaining
+ * exclusive access to the state of the queue.
+ */
+ spin_unlock(&q->sendq.lock);
+
+ reclaim_completed_tx(q->adap, &q->q, false);
+
+ flits = skb->priority; /* previously saved */
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+ BUG_ON(credits < 0);
+ if (unlikely(credits < TXQ_STOP_THRES))
+ ofldtxq_stop(q, skb);
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ if (is_ofld_imm(skb))
+ inline_tx_skb(skb, &q->q, pos);
+ else if (map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
+ txq_stop_maperr(q);
+ spin_lock(&q->sendq.lock);
+ break;
+ } else {
+ int last_desc, hdr_len = skb_transport_offset(skb);
+
+ memcpy(pos, skb->data, hdr_len);
+ write_sgl(skb, &q->q, (void *)pos + hdr_len,
+ pos + flits, hdr_len,
+ (dma_addr_t *)skb->head);
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ skb->dev = q->adap->port[0];
+ skb->destructor = deferred_unmap_destructor;
+#endif
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ }
+
+ txq_advance(&q->q, ndesc);
+ written += ndesc;
+ if (unlikely(written > 32)) {
+ ring_tx_db(q->adap, &q->q, written);
+ written = 0;
+ }
+
+ spin_lock(&q->sendq.lock);
+ __skb_unlink(skb, &q->sendq);
+ if (is_ofld_imm(skb))
+ kfree_skb(skb);
+ }
+ if (likely(written))
+ ring_tx_db(q->adap, &q->q, written);
+}
+
+/**
+ * ofld_xmit - send a packet through an offload queue
+ * @q: the Tx offload queue
+ * @skb: the packet
+ *
+ * Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+ skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
+ spin_lock(&q->sendq.lock);
+ __skb_queue_tail(&q->sendq, skb);
+ if (q->sendq.qlen == 1)
+ service_ofldq(q);
+ spin_unlock(&q->sendq.lock);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ofldq - restart a suspended offload queue
+ * @data: the offload queue to restart
+ *
+ * Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_ofldq(unsigned long data)
+{
+ struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
+
+ spin_lock(&q->sendq.lock);
+ q->full = 0; /* the queue actually is completely empty now */
+ service_ofldq(q);
+ spin_unlock(&q->sendq.lock);
+}
+
+/**
+ * skb_txq - return the Tx queue an offload packet should use
+ * @skb: the packet
+ *
+ * Returns the Tx queue an offload packet should use as indicated by bits
+ * 1-15 in the packet's queue_mapping.
+ */
+static inline unsigned int skb_txq(const struct sk_buff *skb)
+{
+ return skb->queue_mapping >> 1;
+}
+
+/**
+ * is_ctrl_pkt - return whether an offload packet is a control packet
+ * @skb: the packet
+ *
+ * Returns whether an offload packet should use an OFLD or a CTRL
+ * Tx queue as indicated by bit 0 in the packet's queue_mapping.
+ */
+static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
+{
+ return skb->queue_mapping & 1;
+}
+
+static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+ unsigned int idx = skb_txq(skb);
+
+ if (unlikely(is_ctrl_pkt(skb)))
+ return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
+}
+
+/**
+ * t4_ofld_send - send an offload packet
+ * @adap: the adapter
+ * @skb: the packet
+ *
+ * Sends an offload packet. We use the packet queue_mapping to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-15 select the queue.
+ */
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ofld_send(adap, skb);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * cxgb4_ofld_send - send an offload packet
+ * @dev: the net device
+ * @skb: the packet
+ *
+ * Sends an offload packet. This is an exported version of @t4_ofld_send,
+ * intended for ULDs.
+ */
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
+{
+ return t4_ofld_send(netdev2adap(dev), skb);
+}
+EXPORT_SYMBOL(cxgb4_ofld_send);
+
+static inline void copy_frags(struct skb_shared_info *ssi,
+ const struct pkt_gl *gl, unsigned int offset)
+{
+ unsigned int n;
+
+ /* usually there's just one frag */
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
+ ssi->frags[0].size = gl->frags[0].size - offset;
+ ssi->nr_frags = gl->nfrags;
+ n = gl->nfrags - 1;
+ if (n)
+ memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[n].page);
+}
+
+/**
+ * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+
+ /*
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
+ * size, which is expected since buffers are at least PAGE_SIZEd.
+ * In this case packets up to RX_COPY_THRES have only one fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ skb = dev_alloc_skb(gl->tot_len);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = dev_alloc_skb(skb_len);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ copy_frags(skb_shinfo(skb), gl, pull_len);
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+ }
+out: return skb;
+}
+EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
+
+/**
+ * t4_pktgl_free - free a packet gather list
+ * @gl: the gather list
+ *
+ * Releases the pages of a packet gather list. We do not own the last
+ * page on the list and do not free it.
+ */
+void t4_pktgl_free(const struct pkt_gl *gl)
+{
+ int n;
+ const skb_frag_t *p;
+
+ for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
+ put_page(p->page);
+}
+
+/*
+ * Process an MPS trace packet. Give it an unused protocol number so it won't
+ * be delivered to anyone and send it to the stack for capture.
+ */
+static noinline int handle_trace_pkt(struct adapter *adap,
+ const struct pkt_gl *gl)
+{
+ struct sk_buff *skb;
+ struct cpl_trace_pkt *p;
+
+ skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(gl);
+ return 0;
+ }
+
+ p = (struct cpl_trace_pkt *)skb->data;
+ __skb_pull(skb, sizeof(*p));
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(0xffff);
+ skb->dev = adap->port[0];
+ netif_receive_skb(skb);
+ return 0;
+}
+
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+ const struct cpl_rx_pkt *pkt)
+{
+ int ret;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(&rxq->rspq.napi);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return;
+ }
+
+ copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
+ skb->len = gl->tot_len - RX_PKT_PAD;
+ skb->data_len = skb->len;
+ skb->truesize += skb->data_len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rxq->rspq.idx);
+
+ if (unlikely(pkt->vlan_ex)) {
+ struct port_info *pi = netdev_priv(rxq->rspq.netdev);
+ struct vlan_group *grp = pi->vlan_grp;
+
+ rxq->stats.vlan_ex++;
+ if (likely(grp)) {
+ ret = vlan_gro_frags(&rxq->rspq.napi, grp,
+ ntohs(pkt->vlan));
+ goto stats;
+ }
+ }
+ ret = napi_gro_frags(&rxq->rspq.napi);
+stats: if (ret == GRO_HELD)
+ rxq->stats.lro_pkts++;
+ else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+ rxq->stats.lro_merged++;
+ rxq->stats.pkts++;
+ rxq->stats.rx_cso++;
+}
+
+/**
+ * t4_ethrx_handler - process an ingress ethernet packet
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @si: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ bool csum_ok;
+ struct sk_buff *skb;
+ struct port_info *pi;
+ const struct cpl_rx_pkt *pkt;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+ return handle_trace_pkt(q->adap, si);
+
+ pkt = (void *)&rsp[1];
+ csum_ok = pkt->csum_calc && !pkt->err_vec;
+ if ((pkt->l2info & htonl(RXF_TCP)) &&
+ (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
+ do_gro(rxq, si, pkt);
+ return 0;
+ }
+
+ skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(si);
+ rxq->stats.rx_drops++;
+ return 0;
+ }
+
+ __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
+ skb->protocol = eth_type_trans(skb, q->netdev);
+ skb_record_rx_queue(skb, q->idx);
+ pi = netdev_priv(skb->dev);
+ rxq->stats.pkts++;
+
+ if (csum_ok && (pi->rx_offload & RX_CSO) &&
+ (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (!pkt->ip_frag)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else {
+ __sum16 c = (__force __sum16)pkt->csum;
+ skb->csum = csum_unfold(c);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ rxq->stats.rx_cso++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (unlikely(pkt->vlan_ex)) {
+ struct vlan_group *grp = pi->vlan_grp;
+
+ rxq->stats.vlan_ex++;
+ if (likely(grp))
+ vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
+ else
+ dev_kfree_skb_any(skb);
+ } else
+ netif_receive_skb(skb);
+
+ return 0;
+}
+
+/**
+ * restore_rx_bufs - put back a packet's Rx buffers
+ * @si: the packet gather list
+ * @q: the SGE free list
+ * @frags: number of FL buffers to restore
+ *
+ * Puts back on an FL the Rx buffers associated with @si. The buffers
+ * have already been unmapped and are left unmapped, we mark them so to
+ * prevent further unmapping attempts.
+ *
+ * This function undoes a series of @unmap_rx_buf calls when we find out
+ * that the current packet can't be processed right away afterall and we
+ * need to come back to it later. This is a very rare event and there's
+ * no effort to make this particularly efficient.
+ */
+static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
+ int frags)
+{
+ struct rx_sw_desc *d;
+
+ while (frags--) {
+ if (q->cidx == 0)
+ q->cidx = q->size - 1;
+ else
+ q->cidx--;
+ d = &q->sdesc[q->cidx];
+ d->page = si->frags[frags].page;
+ d->dma_addr |= RX_UNMAPPED_BUF;
+ q->avail++;
+ }
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *r,
+ const struct sge_rspq *q)
+{
+ return RSPD_GEN(r->type_gen) == q->gen;
+}
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @q: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+ q->cur_desc = (void *)q->cur_desc + q->iqe_len;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ q->cur_desc = q->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @q: the ingress queue to process
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as control messages from FW
+ * or HW.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget)
+{
+ int ret, rsp_type;
+ int budget_left = budget;
+ const struct rsp_ctrl *rc;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ while (likely(budget_left)) {
+ rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, q))
+ break;
+
+ rmb();
+ rsp_type = RSPD_TYPE(rc->type_gen);
+ if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ skb_frag_t *fp;
+ struct pkt_gl si;
+ const struct rx_sw_desc *rsd;
+ u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
+
+ if (len & RSPD_NEWBUF) {
+ if (likely(q->offset > 0)) {
+ free_rx_bufs(q->adap, &rxq->fl, 1);
+ q->offset = 0;
+ }
+ len &= RSPD_LEN;
+ }
+ si.tot_len = len;
+
+ /* gather packet fragments */
+ for (frags = 0, fp = si.frags; ; frags++, fp++) {
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ bufsz = get_buf_size(rsd);
+ fp->page = rsd->page;
+ fp->page_offset = q->offset;
+ fp->size = min(bufsz, len);
+ len -= fp->size;
+ if (!len)
+ break;
+ unmap_rx_buf(q->adap, &rxq->fl);
+ }
+
+ /*
+ * Last buffer remains mapped so explicitly make it
+ * coherent for CPU access.
+ */
+ dma_sync_single_for_cpu(q->adap->pdev_dev,
+ get_buf_addr(rsd),
+ fp->size, DMA_FROM_DEVICE);
+
+ si.va = page_address(si.frags[0].page) +
+ si.frags[0].page_offset;
+ prefetch(si.va);
+
+ si.nfrags = frags + 1;
+ ret = q->handler(q, q->cur_desc, &si);
+ if (likely(ret == 0))
+ q->offset += ALIGN(fp->size, FL_ALIGN);
+ else
+ restore_rx_bufs(&si, &rxq->fl, frags);
+ } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ ret = q->handler(q, q->cur_desc, NULL);
+ } else {
+ ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+ }
+
+ if (unlikely(ret)) {
+ /* couldn't process descriptor, back off for recovery */
+ q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ break;
+ }
+
+ rspq_next(q);
+ budget_left--;
+ }
+
+ if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
+ __refill_fl(q->adap, &rxq->fl);
+ return budget - budget_left;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for Rx processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI. This does not need any
+ * locking or protection from interrupts as data interrupts are off at
+ * this point and other adapter interrupts do not interfere (the latter
+ * in not a concern at all with MSI-X as non-data interrupts then have
+ * a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ unsigned int params;
+ struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+ int work_done = process_responses(q, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ params = q->next_intr_params;
+ q->next_intr_params = q->intr_params;
+ } else
+ params = QINTR_TIMER_IDX(7);
+
+ t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
+ INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
+ return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue.
+ */
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_rspq *q = cookie;
+
+ napi_schedule(&q->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adap)
+{
+ unsigned int credits;
+ const struct rsp_ctrl *rc;
+ struct sge_rspq *q = &adap->sge.intrq;
+
+ spin_lock(&adap->sge.intrq_lock);
+ for (credits = 0; ; credits++) {
+ rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, q))
+ break;
+
+ rmb();
+ if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+ unsigned int qid = ntohl(rc->pldbuflen_qid);
+
+ napi_schedule(&adap->sge.ingr_map[qid]->napi);
+ }
+
+ rspq_next(q);
+ }
+
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
+ INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
+ spin_unlock(&adap->sge.intrq_lock);
+ return credits;
+}
+
+/*
+ * The MSI interrupt handler, which handles data events from SGE response queues
+ * as well as error and other async events as they all use the same MSI vector.
+ */
+static irqreturn_t t4_intr_msi(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ t4_slow_intr_handler(adap);
+ process_intrq(adap);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt line.
+ */
+static irqreturn_t t4_intr_intx(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+ if (t4_slow_intr_handler(adap) | process_intrq(adap))
+ return IRQ_HANDLED;
+ return IRQ_NONE; /* probably shared interrupt */
+}
+
+/**
+ * t4_intr_handler - select the top-level interrupt handler
+ * @adap: the adapter
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X, MSI, or INTx).
+ */
+irq_handler_t t4_intr_handler(struct adapter *adap)
+{
+ if (adap->flags & USING_MSIX)
+ return t4_sge_intr_msix;
+ if (adap->flags & USING_MSI)
+ return t4_intr_msi;
+ return t4_intr_intx;
+}
+
+static void sge_rx_timer_cb(unsigned long data)
+{
+ unsigned long m;
+ unsigned int i, cnt[2];
+ struct adapter *adap = (struct adapter *)data;
+ struct sge *s = &adap->sge;
+
+ for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+ for (m = s->starving_fl[i]; m; m &= m - 1) {
+ struct sge_eth_rxq *rxq;
+ unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_fl *fl = s->egr_map[id];
+
+ clear_bit(id, s->starving_fl);
+ smp_mb__after_clear_bit();
+
+ if (fl_starving(fl)) {
+ rxq = container_of(fl, struct sge_eth_rxq, fl);
+ if (napi_reschedule(&rxq->rspq.napi))
+ fl->starving++;
+ else
+ set_bit(id, s->starving_fl);
+ }
+ }
+
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
+ cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+ cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+ for (i = 0; i < 2; i++)
+ if (cnt[i] >= s->starve_thres) {
+ if (s->idma_state[i] || cnt[i] == 0xffffffff)
+ continue;
+ s->idma_state[i] = 1;
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+ m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
+ dev_warn(adap->pdev_dev,
+ "SGE idma%u starvation detected for "
+ "queue %lu\n", i, m & 0xffff);
+ } else if (s->idma_state[i])
+ s->idma_state[i] = 0;
+
+ mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+static void sge_tx_timer_cb(unsigned long data)
+{
+ unsigned long m;
+ unsigned int i, budget;
+ struct adapter *adap = (struct adapter *)data;
+ struct sge *s = &adap->sge;
+
+ for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+ for (m = s->txq_maperr[i]; m; m &= m - 1) {
+ unsigned long id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_ofld_txq *txq = s->egr_map[id];
+
+ clear_bit(id, s->txq_maperr);
+ tasklet_schedule(&txq->qresume_tsk);
+ }
+
+ budget = MAX_TIMER_TX_RECLAIM;
+ i = s->ethtxq_rover;
+ do {
+ struct sge_eth_txq *q = &s->ethtxq[i];
+
+ if (q->q.in_use &&
+ time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
+ __netif_tx_trylock(q->txq)) {
+ int avail = reclaimable(&q->q);
+
+ if (avail) {
+ if (avail > budget)
+ avail = budget;
+
+ free_tx_desc(adap, &q->q, avail, true);
+ q->q.in_use -= avail;
+ budget -= avail;
+ }
+ __netif_tx_unlock(q->txq);
+ }
+
+ if (++i >= s->ethqsets)
+ i = 0;
+ } while (budget && i != s->ethtxq_rover);
+ s->ethtxq_rover = i;
+ mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct net_device *dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd)
+{
+ int ret, flsz = 0;
+ struct fw_iq_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Size needs to be multiple of 16, including status entry. */
+ iq->size = roundup(iq->size, 16);
+
+ iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
+ &iq->phys_addr, NULL, 0);
+ if (!iq->desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
+ FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
+ FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
+ FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ -intr_idx - 1));
+ c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ FW_IQ_CMD_IQGTSMODE |
+ FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqsize = htons(iq->size);
+ c.iqaddr = cpu_to_be64(iq->phys_addr);
+
+ if (fl) {
+ fl->size = roundup(fl->size, 8);
+ fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
+ sizeof(struct rx_sw_desc), &fl->addr,
+ &fl->sdesc, STAT_LEN);
+ if (!fl->desc)
+ goto fl_nomem;
+
+ flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0PADEN);
+ c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
+ FW_IQ_CMD_FL0FBMAX(3));
+ c.fl0size = htons(flsz);
+ c.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+ if (ret)
+ goto err;
+
+ netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ iq->cur_desc = iq->desc;
+ iq->cidx = 0;
+ iq->gen = 1;
+ iq->next_intr_params = iq->intr_params;
+ iq->cntxt_id = ntohs(c.iqid);
+ iq->abs_id = ntohs(c.physiqid);
+ iq->size--; /* subtract status entry */
+ iq->adap = adap;
+ iq->netdev = dev;
+ iq->handler = hnd;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ iq->offset = fl ? 0 : -1;
+
+ adap->sge.ingr_map[iq->cntxt_id] = iq;
+
+ if (fl) {
+ fl->cntxt_id = htons(c.fl0id);
+ fl->avail = fl->pend_cred = 0;
+ fl->pidx = fl->cidx = 0;
+ fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
+ adap->sge.egr_map[fl->cntxt_id] = fl;
+ refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
+ }
+ return 0;
+
+fl_nomem:
+ ret = -ENOMEM;
+err:
+ if (iq->desc) {
+ dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
+ iq->desc, iq->phys_addr);
+ iq->desc = NULL;
+ }
+ if (fl && fl->desc) {
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
+ fl->desc, fl->addr);
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+{
+ q->in_use = 0;
+ q->cidx = q->pidx = 0;
+ q->stops = q->restarts = 0;
+ q->stat = (void *)&q->desc[q->size];
+ q->cntxt_id = id;
+ adap->sge.egr_map[id] = q;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *netdevq,
+ unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
+ FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
+ c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
+ FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_ETH_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
+ FW_EQ_ETH_CMD_FBMAX(3) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
+ FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+ if (ret) {
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ txq->txq = netdevq;
+ txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int cmplqid)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
+ sizeof(struct tx_desc), 0, &txq->q.phys_addr,
+ NULL, 0);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
+ FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
+ FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
+ FW_EQ_CTRL_CMD_FBMAX(3) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
+ FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+ if (ret) {
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
+ txq->adap = adap;
+ skb_queue_head_init(&txq->sendq);
+ tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+ txq->full = 0;
+ return 0;
+}
+
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+ struct net_device *dev, unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_ofld_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
+ FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
+ c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
+ FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
+ FW_EQ_OFLD_CMD_FBMAX(3) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
+ FW_EQ_OFLD_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+ if (ret) {
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ txq->adap = adap;
+ skb_queue_head_init(&txq->sendq);
+ tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+ txq->full = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+static void free_txq(struct adapter *adap, struct sge_txq *q)
+{
+ dma_free_coherent(adap->pdev_dev,
+ q->size * sizeof(struct tx_desc) + STAT_LEN,
+ q->desc, q->phys_addr);
+ q->cntxt_id = 0;
+ q->sdesc = NULL;
+ q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
+{
+ unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+ adap->sge.ingr_map[rq->cntxt_id] = NULL;
+ t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id,
+ 0xffff);
+ dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
+ rq->desc, rq->phys_addr);
+ netif_napi_del(&rq->napi);
+ rq->netdev = NULL;
+ rq->cntxt_id = rq->abs_id = 0;
+ rq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(adap, fl, fl->avail);
+ dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
+ fl->desc, fl->addr);
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/**
+ * t4_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+ int i;
+ struct sge_eth_rxq *eq = adap->sge.ethrxq;
+ struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
+
+ /* clean up Ethernet Tx/Rx queues */
+ for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ if (eq->rspq.desc)
+ free_rspq_fl(adap, &eq->rspq, &eq->fl);
+ if (etq->q.desc) {
+ t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id);
+ free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ kfree(etq->q.sdesc);
+ free_txq(adap, &etq->q);
+ }
+ }
+
+ /* clean up RDMA and iSCSI Rx queues */
+ for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
+ for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
+
+ /* clean up offload Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
+ struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
+
+ if (q->q.desc) {
+ tasklet_kill(&q->qresume_tsk);
+ t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id);
+ free_tx_desc(adap, &q->q, q->q.in_use, false);
+ kfree(q->q.sdesc);
+ __skb_queue_purge(&q->sendq);
+ free_txq(adap, &q->q);
+ }
+ }
+
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ tasklet_kill(&cq->qresume_tsk);
+ t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id);
+ __skb_queue_purge(&cq->sendq);
+ free_txq(adap, &cq->q);
+ }
+ }
+
+ if (adap->sge.fw_evtq.desc)
+ free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+
+ if (adap->sge.intrq.desc)
+ free_rspq_fl(adap, &adap->sge.intrq, NULL);
+
+ /* clear the reverse egress queue map */
+ memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+}
+
+void t4_sge_start(struct adapter *adap)
+{
+ adap->sge.ethtxq_rover = 0;
+ mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+ mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ * t4_sge_stop - disable SGE operation
+ * @adap: the adapter
+ *
+ * Stop tasklets and timers associated with the DMA engine. Note that
+ * this is effective only if measures have been taken to disable any HW
+ * events that may restart them.
+ */
+void t4_sge_stop(struct adapter *adap)
+{
+ int i;
+ struct sge *s = &adap->sge;
+
+ if (in_interrupt()) /* actions below require waiting */
+ return;
+
+ if (s->rx_timer.function)
+ del_timer_sync(&s->rx_timer);
+ if (s->tx_timer.function)
+ del_timer_sync(&s->tx_timer);
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
+ struct sge_ofld_txq *q = &s->ofldtxq[i];
+
+ if (q->q.desc)
+ tasklet_kill(&q->qresume_tsk);
+ }
+ for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &s->ctrlq[i];
+
+ if (cq->q.desc)
+ tasklet_kill(&cq->qresume_tsk);
+ }
+}
+
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queues here, instead the driver
+ * top-level must request them individually.
+ */
+void t4_sge_init(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ unsigned int fl_align_log = ilog2(FL_ALIGN);
+
+ t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
+ INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
+ INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
+ RXPKTCPLMODE |
+ (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+ t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK,
+ HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
+#if FL_PG_ORDER > 0
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
+#endif
+ t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
+ THRESHOLD_0(s->counter_val[0]) |
+ THRESHOLD_1(s->counter_val[1]) |
+ THRESHOLD_2(s->counter_val[2]) |
+ THRESHOLD_3(s->counter_val[3]));
+ t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
+ t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
+ t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
+ setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
+ setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
+ s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
+ s->idma_state[0] = s->idma_state[1] = 0;
+ spin_lock_init(&s->intrq_lock);
+}
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
new file mode 100644
index 000000000000..a814a3afe123
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -0,0 +1,3131 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+
+/**
+ * t4_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+
+/**
+ * t4_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+ t4_write_reg(adapter, addr, v | val);
+ (void) t4_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t4_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @nregs: how many indirect registers to read
+ * @start_idx: index of first indirect register to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t4_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t4_write_indirect - write indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect addresses
+ * @data_reg: register holding the value for the indirect registers
+ * @vals: values to write
+ * @nregs: how many indirect registers to write
+ * @start_idx: address of first indirect register to write
+ *
+ * Writes a sequential block of registers that are accessed indirectly
+ * through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx++);
+ t4_write_reg(adap, data_reg, *vals++);
+ }
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+ struct fw_debug_cmd asrt;
+
+ get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+ dev_alert(adap->pdev_dev,
+ "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
+ ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+}
+
+static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+{
+ dev_err(adap->pdev_dev,
+ "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+}
+
+/**
+ * t4_wr_mbox_meat - send a command to FW through the given mailbox
+ * @adap: the adapter
+ * @mbox: index of the mailbox to use
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the selected mailbox and waits
+ * for the FW to execute the command. If @rpl is not %NULL it is used to
+ * store the FW's reply to the command. The command and its optional
+ * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
+ * to respond. @sleep_ok determines whether we may sleep while awaiting
+ * the response. If sleeping is allowed we use progressive backoff
+ * otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ static int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
+ };
+
+ u32 v;
+ u64 res;
+ int i, ms, delay_idx;
+ const __be64 *p = cmd;
+ u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
+ u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
+
+ if ((size & 15) || size > MBOX_LEN)
+ return -EINVAL;
+
+ v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+ for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+ v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+
+ if (v != MBOX_OWNER_DRV)
+ return v ? -EBUSY : -ETIMEDOUT;
+
+ for (i = 0; i < size; i += 8)
+ t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
+
+ t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ t4_read_reg(adap, ctl_reg); /* flush write */
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else
+ mdelay(ms);
+
+ v = t4_read_reg(adap, ctl_reg);
+ if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+ if (!(v & MBMSGVALID)) {
+ t4_write_reg(adap, ctl_reg, 0);
+ continue;
+ }
+
+ res = t4_read_reg64(adap, data_reg);
+ if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
+ fw_asrt(adap, data_reg);
+ res = FW_CMD_RETVAL(EIO);
+ } else if (rpl)
+ get_mbox_rpl(adap, rpl, size / 8, data_reg);
+
+ if (FW_CMD_RETVAL_GET((int)res))
+ dump_mbox(adap, mbox, data_reg);
+ t4_write_reg(adap, ctl_reg, 0);
+ return -FW_CMD_RETVAL_GET((int)res);
+ }
+ }
+
+ dump_mbox(adap, mbox, data_reg);
+ dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
+ *(const u8 *)cmd, mbox);
+ return -ETIMEDOUT;
+}
+
+/**
+ * t4_mc_read - read from MC through backdoor accesses
+ * @adap: the adapter
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+{
+ int i;
+
+ if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
+ return -EBUSY;
+ t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
+ t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
+ t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
+ t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
+ BIST_CMD_GAP(1));
+ i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
+ if (ecc)
+ *ecc = t4_read_reg64(adap, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/**
+ * t4_edc_read - read from EDC through backdoor accesses
+ * @adap: the adapter
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
+ return -EBUSY;
+ t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
+ t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
+ t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
+ t4_write_reg(adap, EDC_BIST_CMD + idx,
+ BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
+ i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
+ if (ecc)
+ *ecc = t4_read_reg64(adap, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+#define VPD_ENTRY(name, len) \
+ u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t4_vpd {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[ID_LEN];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+ VPD_ENTRY(pn, 16); /* part number */
+ VPD_ENTRY(ec, EC_LEN); /* EC level */
+ VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
+ VPD_ENTRY(na, 12); /* MAC address base */
+ VPD_ENTRY(port_type, 8); /* port types */
+ VPD_ENTRY(gpio, 14); /* GPIO usage */
+ VPD_ENTRY(cclk, 6); /* core clock */
+ VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
+ VPD_ENTRY(rv, 1); /* csum */
+ u32 pad; /* for multiple-of-4 sizing and alignment */
+};
+
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0
+
+/**
+ * t4_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: whether to enable or disable write protection
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, bool enable)
+{
+ unsigned int v = enable ? 0xc : 0;
+ int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * get_vpd_params - read VPD parameters from VPD EEPROM
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ int ret;
+ struct t4_vpd vpd;
+ u8 *q = (u8 *)&vpd, csum;
+
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
+ if (ret < 0)
+ return ret;
+
+ for (csum = 0; q <= vpd.rv_data; q++)
+ csum += *q;
+
+ if (csum) {
+ dev_err(adapter->pdev_dev,
+ "corrupted VPD EEPROM, actual csum %u\n", csum);
+ return -EINVAL;
+ }
+
+ p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
+ memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
+ strim(p->id);
+ memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
+ strim(p->ec);
+ memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
+ strim(p->sn);
+ return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_START_SEC = 8, /* first flash sector for FW */
+ FW_END_SEC = 15, /* last flash sector for FW */
+ FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
+ FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, SF_OP) & BUSY)
+ return -EBUSY;
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+ t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ if (!ret)
+ *valp = t4_read_reg(adapter, SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, SF_OP) & BUSY)
+ return -EBUSY;
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+ t4_write_reg(adapter, SF_DATA, val);
+ t4_write_reg(adapter, SF_OP, lock |
+ cont | BYTECNT(byte_cnt - 1) | OP_WR);
+ return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ * flash_wait_op - wait for a flash operation to complete
+ * @adapter: the adapter
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+ int ret;
+ u32 status;
+
+ while (1) {
+ if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
+ (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
+ return ret;
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/**
+ * t4_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
+ (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/**
+ * t4_write_flash - write up to a page of data to the serial flash
+ * @adapter: the adapter
+ * @addr: the start address to write
+ * @n: length of data to write in bytes
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address. All the data must be written to the same page.
+ */
+static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int n, const u8 *data)
+{
+ int ret;
+ u32 buf[64];
+ unsigned int i, c, left, val, offset = addr & 0xff;
+
+ if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+ goto unlock;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = sf1_write(adapter, c, c != left, 1, val);
+ if (ret)
+ goto unlock;
+ }
+ ret = flash_wait_op(adapter, 5, 1);
+ if (ret)
+ goto unlock;
+
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+
+ /* Read the page to verify the write succeeded */
+ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (u8 *)buf + offset, n)) {
+ dev_err(adapter->pdev_dev,
+ "failed to correctly write the flash page at %#x\n",
+ addr);
+ return -EIO;
+ }
+ return 0;
+
+unlock:
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ return ret;
+}
+
+/**
+ * get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter,
+ FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
+ * get_tp_version - read the TP microcode version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
+ tp_microcode_ver),
+ 1, vers, 0);
+}
+
+/**
+ * t4_check_fw_version - check if the FW is compatible with this driver
+ * @adapter: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major version mismatch, and a positive value if the
+ * expected major version is found but there's a minor version mismatch.
+ */
+int t4_check_fw_version(struct adapter *adapter)
+{
+ u32 api_vers[2];
+ int ret, major, minor, micro;
+
+ ret = get_fw_version(adapter, &adapter->params.fw_vers);
+ if (!ret)
+ ret = get_tp_version(adapter, &adapter->params.tp_vers);
+ if (!ret)
+ ret = t4_read_flash(adapter,
+ FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
+ 2, api_vers, 1);
+ if (ret)
+ return ret;
+
+ major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
+ minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
+ micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+ memcpy(adapter->params.api_vers, api_vers,
+ sizeof(adapter->params.api_vers));
+
+ if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ dev_err(adapter->pdev_dev,
+ "card FW has major version %u, driver wants %u\n",
+ major, FW_VERSION_MAJOR);
+ return -EINVAL;
+ }
+
+ if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ return 0; /* perfect match */
+
+ /* Minor/micro version mismatch. Report it but often it's OK. */
+ return 1;
+}
+
+/**
+ * t4_flash_erase_sectors - erase a range of flash sectors
+ * @adapter: the adapter
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given inclusive range.
+ */
+static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+ int ret = 0;
+
+ while (start <= end) {
+ if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 0, 1,
+ SF_ERASE_SECTOR | (start << 8))) != 0 ||
+ (ret = flash_wait_op(adapter, 5, 500)) != 0) {
+ dev_err(adapter->pdev_dev,
+ "erase of flash sector %d failed, error %d\n",
+ start, ret);
+ break;
+ }
+ start++;
+ }
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ return ret;
+}
+
+/**
+ * t4_load_fw - download firmware
+ * @adap: the adapter
+ * @fw_data: the firmware image to write
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ */
+int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+{
+ u32 csum;
+ int ret, addr;
+ unsigned int i;
+ u8 first_page[SF_PAGE_SIZE];
+ const u32 *p = (const u32 *)fw_data;
+ const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+
+ if (!size) {
+ dev_err(adap->pdev_dev, "FW image has no data\n");
+ return -EINVAL;
+ }
+ if (size & 511) {
+ dev_err(adap->pdev_dev,
+ "FW image size not multiple of 512 bytes\n");
+ return -EINVAL;
+ }
+ if (ntohs(hdr->len512) * 512 != size) {
+ dev_err(adap->pdev_dev,
+ "FW image size differs from size in FW header\n");
+ return -EINVAL;
+ }
+ if (size > FW_MAX_SIZE) {
+ dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
+ FW_MAX_SIZE);
+ return -EFBIG;
+ }
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ dev_err(adap->pdev_dev,
+ "corrupted firmware image, checksum %#x\n", csum);
+ return -EINVAL;
+ }
+
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
+ ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
+ if (ret)
+ goto out;
+
+ /*
+ * We write the correct version at the end so the driver can see a bad
+ * version if the FW write fails. Start by writing a copy of the
+ * first page with a bad version.
+ */
+ memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ if (ret)
+ goto out;
+
+ addr = FW_IMG_START;
+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ addr += SF_PAGE_SIZE;
+ fw_data += SF_PAGE_SIZE;
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+ if (ret)
+ goto out;
+ }
+
+ ret = t4_write_flash(adap,
+ FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+out:
+ if (ret)
+ dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
+ ret);
+ return ret;
+}
+
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/**
+ * t4_link_start - apply link configuration to MAC/PHY
+ * @phy: the PHY to setup
+ * @mac: the MAC to setup
+ * @lc: the requested link configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc)
+{
+ struct fw_port_cmd c;
+ unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+
+ lc->link_ok = 0;
+ if (lc->requested_fc & PAUSE_RX)
+ fc |= FW_PORT_CAP_FC_RX;
+ if (lc->requested_fc & PAUSE_TX)
+ fc |= FW_PORT_CAP_FC_TX;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else if (lc->autoneg == AUTONEG_DISABLE) {
+ c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else
+ c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_restart_aneg - restart autonegotiation
+ * @adap: the adapter
+ * @mbox: mbox to use for the FW command
+ * @port: the port id
+ *
+ * Restarts autonegotiation for the selected port.
+ */
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
+{
+ struct fw_port_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+ c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_set_vlan_accel - configure HW VLAN extraction
+ * @adap: the adapter
+ * @ports: bitmap of adapter ports to operate on
+ * @on: enable (1) or disable (0) HW VLAN extraction
+ *
+ * Enables or disables HW extraction of VLAN tags for the ports specified
+ * by @ports. @ports is a bitmap with the ith bit designating the port
+ * associated with the ith adapter channel.
+ */
+void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
+{
+ ports <<= VLANEXTENABLE_SHIFT;
+ t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
+}
+
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/**
+ * t4_handle_intr_status - table driven interrupt handler
+ * @adapter: the adapter that generated the interrupt
+ * @reg: the interrupt status register to process
+ * @acts: table of interrupt actions
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occured. The actions include
+ * optionally emitting a warning or alert message. The table is terminated
+ * by an entry specifying mask 0. Returns the number of fatal interrupt
+ * conditions.
+ */
+static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
+ const struct intr_info *acts)
+{
+ int fatal = 0;
+ unsigned int mask = 0;
+ unsigned int status = t4_read_reg(adapter, reg);
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+ status & acts->mask);
+ } else if (acts->msg && printk_ratelimit())
+ dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+ status & acts->mask);
+ mask |= acts->mask;
+ }
+ status &= mask;
+ if (status) /* clear processed interrupts */
+ t4_write_reg(adapter, reg, status);
+ return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info pcie_intr_info[] = {
+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info tp_intr_info[] = {
+ { 0x3fffffff, "TP parity error", -1, 1 },
+ { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void sge_intr_handler(struct adapter *adapter)
+{
+ u64 v;
+
+ static struct intr_info sge_intr_info[] = {
+ { ERR_CPL_EXCEED_IQE_SIZE,
+ "SGE received CPL exceeding IQE size", -1, 1 },
+ { ERR_INVALID_CIDX_INC,
+ "SGE GTS CIDX increment too large", -1, 0 },
+ { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ "SGE IQID > 1023 received CPL for FL", -1, 0 },
+ { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ 0 },
+ { ERR_ING_CTXT_PRIO,
+ "SGE too many priority ingress contexts", -1, 0 },
+ { ERR_EGR_CTXT_PRIO,
+ "SGE too many priority egress contexts", -1, 0 },
+ { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { 0 }
+ };
+
+ v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
+ ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+ if (v) {
+ dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
+ (unsigned long long)v);
+ t4_write_reg(adapter, SGE_INT_CAUSE1, v);
+ t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
+ }
+
+ if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
+ v != 0)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info cim_intr_info[] = {
+ { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { OBQPARERR, "CIM OBQ parity error", -1, 1 },
+ { IBQPARERR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info cim_upintr_info[] = {
+ { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT, "CIM illegal write", -1, 1 },
+ { ILLRDINT, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
+ cim_intr_info) +
+ t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
+ cim_upintr_info);
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { 0x7fffff, "ULPRX parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info ulptx_intr_info[] = {
+ { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ 0 },
+ { 0xfffffff, "ULPTX parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
+ { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+ { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
+ { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+ { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info cplsw_intr_info[] = {
+ { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void le_intr_handler(struct adapter *adap)
+{
+ static struct intr_info le_intr_info[] = {
+ { LIPMISS, "LE LIP miss", -1, 0 },
+ { LIP0, "LE 0 LIP error", -1, 0 },
+ { PARITYERR, "LE parity error", -1, 1 },
+ { UNKNOWNCMD, "LE unknown command", -1, 1 },
+ { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info mps_rx_intr_info[] = {
+ { 0xffffff, "MPS Rx parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_tx_intr_info[] = {
+ { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
+ { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
+ { BUBBLE, "MPS Tx underflow", -1, 1 },
+ { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR, "MPS Tx framing error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_trc_intr_info[] = {
+ { FILTMEM, "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
+ { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_stat_sram_intr_info[] = {
+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_stat_tx_intr_info[] = {
+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_stat_rx_intr_info[] = {
+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_cls_intr_info[] = {
+ { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
+ mps_rx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
+ mps_tx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
+ mps_trc_intr_info) +
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
+ mps_stat_sram_intr_info) +
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ mps_stat_tx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ mps_stat_rx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
+ mps_cls_intr_info);
+
+ t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
+ RXINT | TXINT | STATINT);
+ t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void mem_intr_handler(struct adapter *adapter, int idx)
+{
+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+ unsigned int addr, cnt_addr, v;
+
+ if (idx <= MEM_EDC1) {
+ addr = EDC_REG(EDC_INT_CAUSE, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ } else {
+ addr = MC_INT_CAUSE;
+ cnt_addr = MC_ECC_STATUS;
+ }
+
+ v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
+ if (v & PERR_INT_CAUSE)
+ dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
+ name[idx]);
+ if (v & ECC_CE_INT_CAUSE) {
+ u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
+
+ t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
+ if (printk_ratelimit())
+ dev_warn(adapter->pdev_dev,
+ "%u %s correctable ECC data error%s\n",
+ cnt, name[idx], cnt > 1 ? "s" : "");
+ }
+ if (v & ECC_UE_INT_CAUSE)
+ dev_alert(adapter->pdev_dev,
+ "%s uncorrectable ECC data error\n", name[idx]);
+
+ t4_write_reg(adapter, addr, v);
+ if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void ma_intr_handler(struct adapter *adap)
+{
+ u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
+
+ if (status & MEM_PERR_INT_CAUSE)
+ dev_alert(adap->pdev_dev,
+ "MA parity error, parity status %#x\n",
+ t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+ if (status & MEM_WRAP_INT_CAUSE) {
+ v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
+ dev_alert(adap->pdev_dev, "MA address wrap-around error by "
+ "client %u to address %#x\n",
+ MEM_WRAP_CLIENT_NUM_GET(v),
+ MEM_WRAP_ADDRESS_GET(v) << 4);
+ }
+ t4_write_reg(adap, MA_INT_CAUSE, status);
+ t4_fatal_err(adap);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void smb_intr_handler(struct adapter *adap)
+{
+ static struct intr_info smb_intr_info[] = {
+ { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void ncsi_intr_handler(struct adapter *adap)
+{
+ static struct intr_info ncsi_intr_info[] = {
+ { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void xgmac_intr_handler(struct adapter *adap, int port)
+{
+ u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+
+ v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ if (!v)
+ return;
+
+ if (v & TXFIFO_PRTY_ERR)
+ dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
+ port);
+ if (v & RXFIFO_PRTY_ERR)
+ dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
+ port);
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
+ t4_fatal_err(adap);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void pl_intr_handler(struct adapter *adap)
+{
+ static struct intr_info pl_intr_info[] = {
+ { FATALPERR, "T4 fatal parity error", -1, 1 },
+ { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
+ t4_fatal_err(adap);
+}
+
+#define PF_INTR_MASK (PFSW | PFCIM)
+#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
+ EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
+ CPL_SWITCH | SGE | ULP_TX)
+
+/**
+ * t4_slow_intr_handler - control path interrupt handler
+ * @adapter: the adapter
+ *
+ * T4 interrupt handler for non-data global interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
+ */
+int t4_slow_intr_handler(struct adapter *adapter)
+{
+ u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
+
+ if (!(cause & GLBL_INTR_MASK))
+ return 0;
+ if (cause & CIM)
+ cim_intr_handler(adapter);
+ if (cause & MPS)
+ mps_intr_handler(adapter);
+ if (cause & NCSI)
+ ncsi_intr_handler(adapter);
+ if (cause & PL)
+ pl_intr_handler(adapter);
+ if (cause & SMB)
+ smb_intr_handler(adapter);
+ if (cause & XGMAC0)
+ xgmac_intr_handler(adapter, 0);
+ if (cause & XGMAC1)
+ xgmac_intr_handler(adapter, 1);
+ if (cause & XGMAC_KR0)
+ xgmac_intr_handler(adapter, 2);
+ if (cause & XGMAC_KR1)
+ xgmac_intr_handler(adapter, 3);
+ if (cause & PCIE)
+ pcie_intr_handler(adapter);
+ if (cause & MC)
+ mem_intr_handler(adapter, MEM_MC);
+ if (cause & EDC0)
+ mem_intr_handler(adapter, MEM_EDC0);
+ if (cause & EDC1)
+ mem_intr_handler(adapter, MEM_EDC1);
+ if (cause & LE)
+ le_intr_handler(adapter);
+ if (cause & TP)
+ tp_intr_handler(adapter);
+ if (cause & MA)
+ ma_intr_handler(adapter);
+ if (cause & PM_TX)
+ pmtx_intr_handler(adapter);
+ if (cause & PM_RX)
+ pmrx_intr_handler(adapter);
+ if (cause & ULP_RX)
+ ulprx_intr_handler(adapter);
+ if (cause & CPL_SWITCH)
+ cplsw_intr_handler(adapter);
+ if (cause & SGE)
+ sge_intr_handler(adapter);
+ if (cause & ULP_TX)
+ ulptx_intr_handler(adapter);
+
+ /* Clear the interrupts just processed for which we are the master. */
+ t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
+ (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+ return 1;
+}
+
+/**
+ * t4_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable PF-specific interrupts for the calling function and the top-level
+ * interrupt concentrator for global interrupts. Interrupts are already
+ * enabled at each module, here we just enable the roots of the interrupt
+ * hierarchies.
+ *
+ * Note: this function should be called only when the driver manages
+ * non PF-specific interrupts from the various HW modules. Only one PCI
+ * function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+ u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+
+ t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
+ ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
+ ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
+ ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
+ ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
+ ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
+ ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
+ EGRESS_SIZE_ERR);
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
+ t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
+}
+
+/**
+ * t4_intr_disable - disable interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrators. The caller must be a PCI function managing global
+ * interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+ u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
+ t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
+}
+
+/**
+ * t4_intr_clear - clear all interrupts
+ * @adapter: the adapter whose interrupts should be cleared
+ *
+ * Clears all interrupts. The caller must be a PCI function managing
+ * global interrupts.
+ */
+void t4_intr_clear(struct adapter *adapter)
+{
+ static const unsigned int cause_reg[] = {
+ SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
+ MC_INT_CAUSE,
+ MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
+ EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
+ CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
+ MYPF_REG(CIM_PF_HOST_INT_CAUSE),
+ TP_INT_CAUSE,
+ ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
+ PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
+ MPS_RX_PERR_INT_CAUSE,
+ CPL_INTR_CAUSE,
+ MYPF_REG(PL_PF_INT_CAUSE),
+ PL_PL_INT_CAUSE,
+ LE_DB_INT_CAUSE,
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
+ t4_write_reg(adapter, cause_reg[i], 0xffffffff);
+
+ t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
+ (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+}
+
+/**
+ * hash_mac_addr - return the hash value of a MAC address
+ * @addr: the 48-bit Ethernet MAC address
+ *
+ * Hashes a MAC address according to the hash function used by HW inexact
+ * (hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+ u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+ u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+ a ^= b;
+ a ^= (a >> 12);
+ a ^= (a >> 6);
+ return a & 0x3f;
+}
+
+/**
+ * t4_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: virtual interface whose RSS subtable is to be written
+ * @start: start entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the response queue lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range allowed for
+ * @viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+ int ret;
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq + nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = htonl(FW_LEN16(cmd));
+
+ /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
+ while (n > 0) {
+ int nq = min(n, 32);
+ __be32 *qp = &cmd.iq0_to_iq2;
+
+ cmd.niqid = htons(nq);
+ cmd.startidx = htons(start);
+
+ start += nq;
+ n -= nq;
+
+ while (nq > 0) {
+ unsigned int v;
+
+ v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+ v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+ v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+
+ *qp++ = htonl(v);
+ nq -= 3;
+ }
+
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * t4_config_glbl_rss - configure the global RSS mode
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @mode: global RSS mode
+ * @flags: mode-specific flags
+ *
+ * Sets the global RSS mode.
+ */
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+ unsigned int flags)
+{
+ struct fw_rss_glb_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ c.retval_len16 = htonl(FW_LEN16(c));
+ if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+ c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ c.u.basicvirtual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ } else
+ return -EINVAL;
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/* Read an RSS table row */
+static int rd_rss_row(struct adapter *adap, int row, u32 *val)
+{
+ t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
+ return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
+ 5, 0, val);
+}
+
+/**
+ * t4_read_rss - read the contents of the RSS mapping table
+ * @adapter: the adapter
+ * @map: holds the contents of the RSS mapping table
+ *
+ * Reads the contents of the RSS hash->queue mapping table.
+ */
+int t4_read_rss(struct adapter *adapter, u16 *map)
+{
+ u32 val;
+ int i, ret;
+
+ for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ ret = rd_rss_row(adapter, i, &val);
+ if (ret)
+ return ret;
+ *map++ = LKPTBLQUEUE0_GET(val);
+ *map++ = LKPTBLQUEUE1_GET(val);
+ }
+ return 0;
+}
+
+/**
+ * t4_tp_get_tcp_stats - read TP's TCP MIB counters
+ * @adap: the adapter
+ * @v4: holds the TCP/IP counter values
+ * @v6: holds the TCP/IPv6 counter values
+ *
+ * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
+ * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
+ */
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6)
+{
+ u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
+
+#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
+#define STAT(x) val[STAT_IDX(x)]
+#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
+
+ if (v4) {
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
+ v4->tcpOutRsts = STAT(OUT_RST);
+ v4->tcpInSegs = STAT64(IN_SEG);
+ v4->tcpOutSegs = STAT64(OUT_SEG);
+ v4->tcpRetransSegs = STAT64(RXT_SEG);
+ }
+ if (v6) {
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
+ v6->tcpOutRsts = STAT(OUT_RST);
+ v6->tcpInSegs = STAT64(IN_SEG);
+ v6->tcpOutSegs = STAT64(OUT_SEG);
+ v6->tcpRetransSegs = STAT64(RXT_SEG);
+ }
+#undef STAT64
+#undef STAT
+#undef STAT_IDX
+}
+
+/**
+ * t4_tp_get_err_stats - read TP's error MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
+ 12, TP_MIB_MAC_IN_ERR_0);
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
+ 8, TP_MIB_TNL_CNG_DROP_0);
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
+ 4, TP_MIB_TNL_DROP_0);
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
+ 4, TP_MIB_OFD_VLN_DROP_0);
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
+ 4, TP_MIB_TCP_V6IN_ERR_0);
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
+ 2, TP_MIB_OFD_ARP_DROP);
+}
+
+/**
+ * t4_read_mtu_tbl - returns the values in the HW path MTU table
+ * @adap: the adapter
+ * @mtus: where to store the MTU values
+ * @mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ * Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+ u32 v;
+ int i;
+
+ for (i = 0; i < NMTUS; ++i) {
+ t4_write_reg(adap, TP_MTU_TABLE,
+ MTUINDEX(0xff) | MTUVALUE(i));
+ v = t4_read_reg(adap, TP_MTU_TABLE);
+ mtus[i] = MTUVALUE_GET(v);
+ if (mtu_log)
+ mtu_log[i] = MTUWIDTH_GET(v);
+ }
+}
+
+/**
+ * init_cong_ctrl - initialize congestion control parameters
+ * @a: the alpha values for congestion control
+ * @b: the beta values for congestion control
+ *
+ * Initialize the congestion control parameters.
+ */
+static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+ b[9] = b[10] = 1;
+ b[11] = b[12] = 2;
+ b[13] = b[14] = b[15] = b[16] = 3;
+ b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+ b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+ b[28] = b[29] = 6;
+ b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t4_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the values for the MTU table
+ * @alpha: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ *
+ * Write the HW MTU table with the supplied MTUs and the high-speed
+ * congestion control table with the supplied alpha, beta, and MTUs.
+ * We write the two tables together because the additive increments
+ * depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = mtus[i];
+ unsigned int log2 = fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
+ MTUWIDTH(log2) | MTUVALUE(mtu));
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t4_set_trace_filter - configure one of the tracing filters
+ * @adap: the adapter
+ * @tp: the desired trace filter parameters
+ * @idx: which filter to configure
+ * @enable: whether to enable or disable the filter
+ *
+ * Configures one of the tracing filters available in HW. If @enable is
+ * %0 @tp is not examined and may be %NULL.
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+ int idx, int enable)
+{
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg, cfg;
+ u32 multitrc = TRCMULTIFILTER;
+
+ if (!enable) {
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+ goto out;
+ }
+
+ if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
+ tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
+ tp->snap_len > 9600 || (idx && tp->snap_len > 256))
+ return -EINVAL;
+
+ if (tp->snap_len > 256) { /* must be tracer 0 */
+ if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
+ t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
+ t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
+ return -EINVAL; /* other tracers are enabled */
+ multitrc = 0;
+ } else if (idx) {
+ i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
+ if (TFCAPTUREMAX_GET(i) > 256 &&
+ (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
+ return -EINVAL;
+ }
+
+ /* stop the tracer we'll be changing */
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+
+ /* disable tracing globally if running in the wrong single/multi mode */
+ cfg = t4_read_reg(adap, MPS_TRC_CFG);
+ if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
+ t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
+ t4_read_reg(adap, MPS_TRC_CFG); /* flush */
+ msleep(1);
+ if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
+ return -ETIMEDOUT;
+ }
+ /*
+ * At this point either the tracing is enabled and in the right mode or
+ * disabled.
+ */
+
+ idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
+ data_reg = MPS_TRC_FILTER0_MATCH + idx;
+ mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ t4_write_reg(adap, data_reg, tp->data[i]);
+ t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+ }
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
+ TFCAPTUREMAX(tp->snap_len) |
+ TFMINPKTSIZE(tp->min_len));
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
+ TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
+ TFPORT(tp->port) | TFEN |
+ (tp->invert ? TFINVERTMATCH : 0));
+
+ cfg &= ~TRCMULTIFILTER;
+ t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
+out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
+ return 0;
+}
+
+/**
+ * t4_get_trace_filter - query one of the tracing filters
+ * @adap: the adapter
+ * @tp: the current trace filter parameters
+ * @idx: which trace filter to query
+ * @enabled: non-zero if the filter is enabled
+ *
+ * Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+ int *enabled)
+{
+ u32 ctla, ctlb;
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg;
+
+ ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
+ ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+
+ *enabled = !!(ctla & TFEN);
+ tp->snap_len = TFCAPTUREMAX_GET(ctlb);
+ tp->min_len = TFMINPKTSIZE_GET(ctlb);
+ tp->skip_ofst = TFOFFSET_GET(ctla);
+ tp->skip_len = TFLENGTH_GET(ctla);
+ tp->invert = !!(ctla & TFINVERTMATCH);
+ tp->port = TFPORT_GET(ctla);
+
+ ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
+ data_reg = MPS_TRC_FILTER0_MATCH + ofst;
+ mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+ tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+ }
+}
+
+/**
+ * get_mps_bg_map - return the buffer groups associated with a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Returns a bitmap indicating which MPS buffer groups are associated
+ * with the given port. Bit i is set if buffer group i is used by the
+ * port.
+ */
+static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+{
+ u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
+
+ if (n == 0)
+ return idx == 0 ? 0xf : 0;
+ if (n == 1)
+ return idx < 2 ? (3 << (2 * idx)) : 0;
+ return 1 << idx;
+}
+
+/**
+ * t4_get_port_stats - collect port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ * @p: the stats structure to fill
+ *
+ * Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+ u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
+
+ p->tx_octets = GET_STAT(TX_PORT_BYTES);
+ p->tx_frames = GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 = GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop = GET_STAT(TX_PORT_DROP);
+ p->tx_pause = GET_STAT(TX_PORT_PAUSE);
+ p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+
+ p->rx_octets = GET_STAT(RX_PORT_BYTES);
+ p->rx_frames = GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
+ p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 = GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_pause = GET_STAT(RX_PORT_PAUSE);
+ p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+
+ p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+ p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_get_lb_stats - collect loopback port statistics
+ * @adap: the adapter
+ * @idx: the loopback port index
+ * @p: the stats structure to fill
+ *
+ * Return HW statistics for the given loopback port.
+ */
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
+{
+ u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
+
+ p->octets = GET_STAT(BYTES);
+ p->frames = GET_STAT(FRAMES);
+ p->bcast_frames = GET_STAT(BCAST);
+ p->mcast_frames = GET_STAT(MCAST);
+ p->ucast_frames = GET_STAT(UCAST);
+ p->error_frames = GET_STAT(ERROR);
+
+ p->frames_64 = GET_STAT(64B);
+ p->frames_65_127 = GET_STAT(65B_127B);
+ p->frames_128_255 = GET_STAT(128B_255B);
+ p->frames_256_511 = GET_STAT(256B_511B);
+ p->frames_512_1023 = GET_STAT(512B_1023B);
+ p->frames_1024_1518 = GET_STAT(1024B_1518B);
+ p->frames_1519_max = GET_STAT(1519B_MAX);
+ p->drop = t4_read_reg(adap, PORT_REG(idx,
+ MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
+
+ p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+ p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+ p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+ p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+ p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+ p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+ p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+ p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_wol_magic_enable - enable/disable magic packet WoL
+ * @adap: the adapter
+ * @port: the physical port index
+ * @addr: MAC address expected in magic packets, %NULL to disable
+ *
+ * Enables/disables magic packet wake-on-LAN for the selected port.
+ */
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+ const u8 *addr)
+{
+ if (addr) {
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
+ (addr[2] << 24) | (addr[3] << 16) |
+ (addr[4] << 8) | addr[5]);
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
+ (addr[0] << 8) | addr[1]);
+ }
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
+ addr ? MAGICEN : 0);
+}
+
+/**
+ * t4_wol_pat_enable - enable/disable pattern-based WoL
+ * @adap: the adapter
+ * @port: the physical port index
+ * @map: bitmap of which HW pattern filters to set
+ * @mask0: byte mask for bytes 0-63 of a packet
+ * @mask1: byte mask for bytes 64-127 of a packet
+ * @crc: Ethernet CRC for selected bytes
+ * @enable: enable/disable switch
+ *
+ * Sets the pattern filters indicated in @map to mask out the bytes
+ * specified in @mask0/@mask1 in received packets and compare the CRC of
+ * the resulting packet against @crc. If @enable is %true pattern-based
+ * WoL is enabled, otherwise disabled.
+ */
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+ u64 mask0, u64 mask1, unsigned int crc, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
+ PATEN, 0);
+ return 0;
+ }
+ if (map > 0xff)
+ return -EINVAL;
+
+#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
+
+ t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
+ t4_write_reg(adap, EPIO_REG(DATA2), mask1);
+ t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
+
+ for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
+ if (!(map & 1))
+ continue;
+
+ /* write byte masks */
+ t4_write_reg(adap, EPIO_REG(DATA0), mask0);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
+ t4_read_reg(adap, EPIO_REG(OP)); /* flush */
+ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ return -ETIMEDOUT;
+
+ /* write CRC */
+ t4_write_reg(adap, EPIO_REG(DATA0), crc);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
+ t4_read_reg(adap, EPIO_REG(OP)); /* flush */
+ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ return -ETIMEDOUT;
+ }
+#undef EPIO_REG
+
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
+ return 0;
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+ (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
+ FW_CMD_REQUEST | FW_CMD_##rd_wr); \
+ (var).retval_len16 = htonl(FW_LEN16(var)); \
+} while (0)
+
+/**
+ * t4_mdio_rd - read a PHY register through MDIO
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @phy_addr: the PHY address
+ * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ * @reg: the register to read
+ * @valp: where to store the value
+ *
+ * Issues a FW command through the given mailbox to read a PHY register.
+ */
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 *valp)
+{
+ int ret;
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+ FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.cycles_to_len16 = htonl(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
+ FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.raddr = htons(reg);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ *valp = ntohs(c.u.mdio.rval);
+ return ret;
+}
+
+/**
+ * t4_mdio_wr - write a PHY register through MDIO
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @phy_addr: the PHY address
+ * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ * @reg: the register to write
+ * @valp: value to write
+ *
+ * Issues a FW command through the given mailbox to write a PHY register.
+ */
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 val)
+{
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.cycles_to_len16 = htonl(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
+ FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.raddr = htons(reg);
+ c.u.mdio.rval = htons(val);
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @evt_mbox: mailbox to receive async FW events
+ * @master: specifies the caller's willingness to be the device master
+ * @state: returns the current device state
+ *
+ * Issues a command to establish communication with FW.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state)
+{
+ int ret;
+ struct fw_hello_cmd c;
+
+ INIT_CMD(c, HELLO, WRITE);
+ c.err_to_mbasyncnot = htonl(
+ FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
+ FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0 && state) {
+ u32 v = ntohl(c.err_to_mbasyncnot);
+ if (v & FW_HELLO_CMD_INIT)
+ *state = DEV_STATE_INIT;
+ else if (v & FW_HELLO_CMD_ERR)
+ *state = DEV_STATE_ERR;
+ else
+ *state = DEV_STATE_UNINIT;
+ }
+ return ret;
+}
+
+/**
+ * t4_fw_bye - end communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_bye_cmd c;
+
+ INIT_CMD(c, BYE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_init_cmd - ask FW to initialize the device
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to FW to partially initialize the device. This
+ * performs initialization that generally doesn't depend on user input.
+ */
+int t4_early_init(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_initialize_cmd c;
+
+ INIT_CMD(c, INITIALIZE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_reset - issue a reset to FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @reset: specifies the type of reset to perform
+ *
+ * Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+ struct fw_reset_cmd c;
+
+ INIT_CMD(c, RESET, WRITE);
+ c.val = htonl(reset);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_query_params - query FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ *
+ * Reads the value of FW or device parameters. Up to 7 parameters can be
+ * queried at once.
+ */
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ int i, ret;
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
+ FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
+ FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ for (i = 0; i < nparams; i++, p += 2)
+ *p = htonl(*params++);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+ *val++ = ntohl(*p);
+ return ret;
+}
+
+/**
+ * t4_set_params - sets FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ *
+ * Sets the value of FW or device parameters. Up to 7 parameters can be
+ * specified at once.
+ */
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val)
+{
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
+ FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ while (nparams--) {
+ *p++ = htonl(*params++);
+ *p++ = htonl(*val++);
+ }
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_cfg_pfvf - configure PF/VF resource limits
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF being configured
+ * @vf: the VF being configured
+ * @txq: the max number of egress queues
+ * @txq_eth_ctrl: the max number of egress Ethernet or control queues
+ * @rxqi: the max number of interrupt-capable ingress queues
+ * @rxq: the max number of interruptless ingress queues
+ * @tc: the PCI traffic class
+ * @vi: the max number of virtual interfaces
+ * @cmask: the channel access rights mask for the PF/VF
+ * @pmask: the port access rights mask for the PF/VF
+ * @nexact: the maximum number of exact MPS filters
+ * @rcaps: read capabilities
+ * @wxcaps: write/execute capabilities
+ *
+ * Configures resource limits and capabilities for a physical or virtual
+ * function.
+ */
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+ unsigned int rxqi, unsigned int rxq, unsigned int tc,
+ unsigned int vi, unsigned int cmask, unsigned int pmask,
+ unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
+{
+ struct fw_pfvf_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
+ FW_PFVF_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
+ FW_PFVF_CMD_NIQ(rxq));
+ c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
+ FW_PFVF_CMD_PMASK(pmask) |
+ FW_PFVF_CMD_NEQ(txq));
+ c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
+ FW_PFVF_CMD_NEXACTF(nexact));
+ c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
+ FW_PFVF_CMD_WX_CAPS(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_alloc_vi - allocate a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ * @mac should be large enough to hold @nmac Ethernet addresses, they are
+ * stored consecutively so the space needed is @nmac * 6 bytes.
+ * Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size)
+{
+ int ret;
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
+ c.portid_pkd = FW_VI_CMD_PORTID(port);
+ c.nmac = nmac - 1;
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ if (mac) {
+ memcpy(mac, c.mac, sizeof(c.mac));
+ switch (nmac) {
+ case 5:
+ memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ case 4:
+ memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ case 3:
+ memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ case 2:
+ memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
+ }
+ }
+ if (rss_size)
+ *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
+ return ntohs(c.viid_pkd);
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
+ FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
+ c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+}
+
+/**
+ * t4_set_rxmode - set Rx properties of a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @mtu: the new MTU or -1
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd c;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = FW_RXMODE_MTU_NO_CHG;
+ if (promisc < 0)
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ if (all_multi < 0)
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ if (bcast < 0)
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @free: if true any existing filters for this VI id are first removed
+ * @naddr: the number of MAC addresses to allocate filters for (up to 7)
+ * @addr: the MAC address(es)
+ * @idx: where to store the index of each allocated filter
+ * @hash: pointer to hash address filter bitmap
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Allocates an exact-match filter for each of the supplied addresses and
+ * sets it to the corresponding address. If @idx is not %NULL it should
+ * have at least @naddr entries, each of which will be set to the index of
+ * the filter allocated for the corresponding MAC address. If a filter
+ * could not be allocated for an address its index is set to 0xffff.
+ * If @hash is not %NULL addresses that fail to allocate an exact filter
+ * are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ * Returns a negative error number or the number of filters allocated.
+ */
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool free, unsigned int naddr,
+ const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
+{
+ int i, ret;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p;
+
+ if (naddr > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16((naddr + 2) / 2));
+
+ for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
+ }
+
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret)
+ return ret;
+
+ for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+
+ if (idx)
+ idx[i] = index >= NEXACT_MAC ? 0xffff : index;
+ if (index < NEXACT_MAC)
+ ret++;
+ else if (hash)
+ *hash |= (1 << hash_mac_addr(addr[i]));
+ }
+ return ret;
+}
+
+/**
+ * t4_change_mac - modifies the exact-match filter for a MAC address
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address.
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the one
+ * being used by the old address value and allocate a new filter for the
+ * new address value. @idx can be -1 if the address is a new addition.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt)
+{
+ int ret, mode;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p = c.u.exact;
+
+ if (idx < 0) /* new allocation */
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+ mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+ FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+ if (ret >= NEXACT_MAC)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4_set_addr_hash - program the MAC inexact-match hash filter
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @ucast: whether the hash filter should also match unicast addresses
+ * @vec: the value to be written to the hash filter
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
+ FW_VI_MAC_CMD_HASHUNIEN(ucast) |
+ FW_CMD_LEN16(1));
+ c.u.hash.hashvec = cpu_to_be64(vec);
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ * t4_enable_vi - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface.
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ struct fw_vi_enable_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
+ FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_identify_port - identify a VI's port by blinking its LED
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @nblinks: how many times to blink LED at 2.5 Hz
+ *
+ * Identifies a VI's port by blinking its LED.
+ */
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ unsigned int nblinks)
+{
+ struct fw_vi_enable_cmd c;
+
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+ c.blinkdur = htons(nblinks);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @start: %true to enable the queues, %false to disable them
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Starts or stops an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
+ FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
+ FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
+ c.iqid = htons(iqid);
+ c.fl0id = htons(fl0id);
+ c.fl1id = htons(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_iq_free - free an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
+ FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
+ c.iqid = htons(iqid);
+ c.fl0id = htons(fl0id);
+ c.fl1id = htons(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_eth_eq_free - free an Ethernet egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
+ FW_EQ_ETH_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
+ FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_ofld_eq_free - free an offload egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ofld_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
+ FW_EQ_OFLD_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+ u8 opcode = *(const u8 *)rpl;
+
+ if (opcode == FW_PORT_CMD) { /* link/module state change message */
+ int speed = 0, fc = 0;
+ const struct fw_port_cmd *p = (void *)rpl;
+ int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
+ int port = adap->chan_map[chan];
+ struct port_info *pi = adap2pinfo(adap, port);
+ struct link_config *lc = &pi->link_cfg;
+ u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+ int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
+ u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
+
+ if (stat & FW_PORT_CMD_RXPAUSE)
+ fc |= PAUSE_RX;
+ if (stat & FW_PORT_CMD_TXPAUSE)
+ fc |= PAUSE_TX;
+ if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ speed = SPEED_100;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ speed = SPEED_1000;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ speed = SPEED_10000;
+
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) { /* something changed */
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ t4_os_link_changed(adap, port, link_ok);
+ }
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4_os_portmod_changed(adap, port);
+ }
+ }
+ return 0;
+}
+
+static void __devinit get_pci_mode(struct adapter *adapter,
+ struct pci_params *p)
+{
+ u16 val;
+ u32 pcie_cap = pci_pcie_cap(adapter->pdev);
+
+ if (pcie_cap) {
+ pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
+ &val);
+ p->speed = val & PCI_EXP_LNKSTA_CLS;
+ p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
+ }
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @caps: link capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+ unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+static int __devinit wait_dev_ready(struct adapter *adap)
+{
+ if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
+ return 0;
+ msleep(500);
+ return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
+}
+
+/**
+ * t4_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ * @reset: if true perform a HW reset
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int __devinit t4_prep_adapter(struct adapter *adapter)
+{
+ int ret;
+
+ ret = wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ get_pci_mode(adapter, &adapter->params.pci);
+ adapter->params.rev = t4_read_reg(adapter, PL_REV);
+
+ ret = get_vpd_params(adapter, &adapter->params.vpd);
+ if (ret < 0)
+ return ret;
+
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+ /*
+ * Default port for debugging in case we can't reach FW.
+ */
+ adapter->params.nports = 1;
+ adapter->params.portvec = 1;
+ return 0;
+}
+
+int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ u8 addr[6];
+ int ret, i, j = 0;
+ struct fw_port_cmd c;
+
+ memset(&c, 0, sizeof(c));
+
+ for_each_port(adap, i) {
+ unsigned int rss_size;
+ struct port_info *p = adap2pinfo(adap, i);
+
+ while ((adap->params.portvec & (1 << j)) == 0)
+ j++;
+
+ c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ |
+ FW_PORT_CMD_PORTID(j));
+ c.action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(c));
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ p->viid = ret;
+ p->tx_chan = j;
+ p->lport = j;
+ p->rss_size = rss_size;
+ memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+ memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
+
+ ret = ntohl(c.u.info.lstatus_to_modtype);
+ p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
+ FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
+ p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
+ p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
+
+ init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+ j++;
+ }
+ return 0;
+}
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
new file mode 100644
index 000000000000..025623285c93
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -0,0 +1,100 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+#include <linux/types.h>
+
+enum {
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408, /* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NEXACT_MAC = 336, /* # of exact MAC address filters */
+ L2T_SIZE = 4096, /* # of L2T entries */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
+ NWOL_PAT = 8, /* # of WoL patterns */
+ WOL_PAT_LEN = 128, /* length of WoL patterns */
+};
+
+enum {
+ SF_PAGE_SIZE = 256, /* serial flash page size */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
+};
+
+enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
+
+enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
+
+enum {
+ SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
+ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+};
+
+struct sge_qstat { /* data written to SGE queue status entries */
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ };
+};
+
+#define RSPD_NEWBUF 0x80000000U
+#define RSPD_LEN 0x7fffffffU
+
+#define RSPD_GEN(x) ((x) >> 7)
+#define RSPD_TYPE(x) (((x) >> 4) & 3)
+
+#define QINTR_CNT_EN 0x1
+#define QINTR_TIMER_IDX(x) ((x) << 1)
+#endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
new file mode 100644
index 000000000000..fdb117443144
--- /dev/null
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -0,0 +1,664 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_MSG_H
+#define __T4_MSG_H
+
+#include <linux/types.h>
+
+enum {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_ACCEPT_RPL = 0x2,
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_GET_TCB = 0x6,
+ CPL_CLOSE_CON_REQ = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_RX_DATA_ACK = 0xD,
+ CPL_TX_PKT = 0xE,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_TID_RELEASE = 0x1A,
+
+ CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PASS_OPEN_RPL = 0x24,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_PEER_CLOSE = 0x26,
+ CPL_ABORT_REQ_RSS = 0x2B,
+ CPL_ABORT_RPL_RSS = 0x2D,
+
+ CPL_CLOSE_CON_RPL = 0x32,
+ CPL_ISCSI_HDR = 0x33,
+ CPL_RDMA_CQE = 0x35,
+ CPL_RDMA_CQE_READ_RSP = 0x36,
+ CPL_RDMA_CQE_ERR = 0x37,
+ CPL_RX_DATA = 0x39,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_RX_PKT = 0x3B,
+ CPL_RX_DDP_COMPLETE = 0x3F,
+
+ CPL_ACT_ESTABLISH = 0x40,
+ CPL_PASS_ESTABLISH = 0x41,
+ CPL_RX_DATA_DDP = 0x42,
+ CPL_PASS_ACCEPT_REQ = 0x44,
+
+ CPL_RDMA_READ_REQ = 0x60,
+
+ CPL_PASS_OPEN_REQ6 = 0x81,
+ CPL_ACT_OPEN_REQ6 = 0x83,
+
+ CPL_RDMA_TERMINATE = 0xA2,
+ CPL_RDMA_WRITE = 0xA4,
+ CPL_SGE_EGR_UPDATE = 0xA5,
+
+ CPL_TRACE_PKT = 0xB0,
+
+ CPL_FW4_MSG = 0xC0,
+ CPL_FW4_PLD = 0xC1,
+ CPL_FW4_ACK = 0xC3,
+
+ CPL_FW6_MSG = 0xE0,
+ CPL_FW6_PLD = 0xE1,
+ CPL_TX_PKT_LSO = 0xED,
+ CPL_TX_PKT_XT = 0xEE,
+
+ NUM_CPL_CMDS
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_BAD_LENGTH = 15,
+ CPL_ERR_BAD_ROUTE = 18,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST_SYNRECV = 21,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_RTX_NEG_ADVICE = 35,
+ CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_IWARP_FLM = 50,
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_RDMA = 4,
+ ULP_MODE_FCOE = 6,
+};
+
+enum {
+ ULP_CRC_HEADER = 1 << 0,
+ ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
+enum { /* TX_PKT_XT checksum types */
+ TX_CSUM_TCP = 0,
+ TX_CSUM_UDP = 1,
+ TX_CSUM_CRC16 = 4,
+ TX_CSUM_CRC32 = 5,
+ TX_CSUM_CRC32C = 6,
+ TX_CSUM_FCOE = 7,
+ TX_CSUM_TCPIP = 8,
+ TX_CSUM_UDPIP = 9,
+ TX_CSUM_TCPIP6 = 10,
+ TX_CSUM_UDPIP6 = 11,
+ TX_CSUM_IP = 12,
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ u8 opcode;
+};
+
+#define CPL_OPCODE(x) ((x) << 24)
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
+
+/* partitioning of TID fields that also carry a queue id */
+#define GET_TID_TID(x) ((x) & 0x3fff)
+#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
+#define TID_QID(x) ((x) << 14)
+
+struct rss_header {
+ u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 channel:2;
+ u8 filter_hit:1;
+ u8 filter_tid:1;
+ u8 hash_type:2;
+ u8 ipv6:1;
+ u8 send2fw:1;
+#else
+ u8 send2fw:1;
+ u8 ipv6:1;
+ u8 hash_type:2;
+ u8 filter_tid:1;
+ u8 filter_hit:1;
+ u8 channel:2;
+#endif
+ __be16 qid;
+ __be32 hash_val;
+};
+
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_mid;
+ __be64 wr_lo;
+};
+
+#define WR_HDR struct work_request_hdr wr
+
+struct cpl_pass_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+#define TX_CHAN(x) ((x) << 2)
+#define DELACK(x) ((x) << 5)
+#define ULP_MODE(x) ((x) << 8)
+#define RCV_BUFSIZ(x) ((x) << 12)
+#define DSCP(x) ((x) << 22)
+#define SMAC_SEL(x) ((u64)(x) << 28)
+#define L2T_IDX(x) ((u64)(x) << 36)
+#define NAGLE(x) ((u64)(x) << 49)
+#define WND_SCALE(x) ((u64)(x) << 50)
+#define KEEP_ALIVE(x) ((u64)(x) << 54)
+#define MSS_IDX(x) ((u64)(x) << 60)
+ __be64 opt1;
+#define SYN_RSS_ENABLE (1 << 0)
+#define SYN_RSS_QUEUE(x) ((x) << 2)
+#define CONN_POLICY_ASK (1 << 22)
+};
+
+struct cpl_pass_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be64 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+#define RSS_QUEUE(x) ((x) << 0)
+#define RSS_QUEUE_VALID (1 << 10)
+#define RX_COALESCE_VALID(x) ((x) << 11)
+#define RX_COALESCE(x) ((x) << 12)
+#define TX_QUEUE(x) ((x) << 23)
+#define RX_CHANNEL(x) ((x) << 26)
+#define WND_SCALE_EN(x) ((x) << 28)
+#define TSTAMPS_EN(x) ((x) << 29)
+#define SACK_EN(x) ((x) << 30)
+ __be64 opt0;
+};
+
+struct cpl_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 params;
+ __be32 opt2;
+};
+
+struct cpl_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 params;
+ __be32 opt2;
+};
+
+struct cpl_act_open_rpl {
+ union opcode_tid ot;
+ __be32 atid_status;
+#define GET_AOPEN_STATUS(x) ((x) & 0xff)
+#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
+};
+
+struct cpl_pass_establish {
+ union opcode_tid ot;
+ __be32 rsvd;
+ __be32 tos_stid;
+#define GET_POPEN_TID(x) ((x) & 0xffffff)
+#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
+ __be16 mac_idx;
+ __be16 tcp_opt;
+#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
+#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
+#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
+#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_act_establish {
+ union opcode_tid ot;
+ __be32 rsvd;
+ __be32 tos_atid;
+ __be16 mac_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+#define QUEUENO(x) ((x) << 0)
+#define REPLY_CHAN(x) ((x) << 14)
+#define NO_REPLY(x) ((x) << 15)
+ __be16 cookie;
+};
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+#define TCB_WORD(x) ((x) << 0)
+#define TCB_COOKIE(x) ((x) << 5)
+ __be64 mask;
+ __be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+ union opcode_tid ot;
+ __be16 rsvd;
+ u8 cookie;
+ u8 status;
+ __be64 oldval;
+};
+
+struct cpl_close_con_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
+struct cpl_close_listsvr_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+#define LISTSVR_IPV6 (1 << 14)
+ __be16 rsvd;
+};
+
+struct cpl_close_listsvr_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_req_rss {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ u8 rsvd1;
+ u8 cmd;
+ u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ u8 rsvd1;
+ u8 cmd;
+ u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ union opcode_tid ot;
+ __be32 rcv_nxt;
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_tx_pkt_core {
+ __be32 ctrl0;
+#define TXPKT_VF(x) ((x) << 0)
+#define TXPKT_PF(x) ((x) << 8)
+#define TXPKT_VF_VLD (1 << 11)
+#define TXPKT_OVLAN_IDX(x) ((x) << 12)
+#define TXPKT_INTF(x) ((x) << 16)
+#define TXPKT_INS_OVLAN (1 << 21)
+#define TXPKT_OPCODE(x) ((x) << 24)
+ __be16 pack;
+ __be16 len;
+ __be64 ctrl1;
+#define TXPKT_CSUM_END(x) ((x) << 12)
+#define TXPKT_CSUM_START(x) ((x) << 20)
+#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
+#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
+#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
+#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
+#define TXPKT_VLAN(x) ((u64)(x) << 44)
+#define TXPKT_VLAN_VLD (1ULL << 60)
+#define TXPKT_IPCSUM_DIS (1ULL << 62)
+#define TXPKT_L4CSUM_DIS (1ULL << 63)
+};
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ struct cpl_tx_pkt_core c;
+};
+
+#define cpl_tx_pkt_xt cpl_tx_pkt
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ __be32 lso_ctrl;
+#define LSO_TCPHDR_LEN(x) ((x) << 0)
+#define LSO_IPHDR_LEN(x) ((x) << 4)
+#define LSO_ETHHDR_LEN(x) ((x) << 16)
+#define LSO_IPV6(x) ((x) << 20)
+#define LSO_LAST_SLICE (1 << 22)
+#define LSO_FIRST_SLICE (1 << 23)
+#define LSO_OPCODE(x) ((x) << 24)
+ __be16 ipid_ofst;
+ __be16 mss;
+ __be32 seqno_offset;
+ __be32 len;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_iscsi_hdr {
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
+#define ISCSI_DDP (1 << 15)
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_rx_data {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 dack_mode:2;
+ u8 psh:1;
+ u8 heartbeat:1;
+ u8 ddp_off:1;
+ u8 :3;
+#else
+ u8 :3;
+ u8 ddp_off:1;
+ u8 heartbeat:1;
+ u8 psh:1;
+ u8 dack_mode:2;
+#endif
+ u8 status;
+};
+
+struct cpl_rx_data_ack {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 credit_dack;
+#define RX_CREDITS(x) ((x) << 0)
+#define RX_FORCE_ACK(x) ((x) << 28)
+};
+
+struct cpl_rx_pkt {
+ u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 iff:4;
+ u8 csum_calc:1;
+ u8 ipmi_pkt:1;
+ u8 vlan_ex:1;
+ u8 ip_frag:1;
+#else
+ u8 ip_frag:1;
+ u8 vlan_ex:1;
+ u8 ipmi_pkt:1;
+ u8 csum_calc:1;
+ u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+ __be32 l2info;
+#define RXF_UDP (1 << 22)
+#define RXF_TCP (1 << 23)
+ __be16 hdr_len;
+ __be16 err_vec;
+};
+
+struct cpl_trace_pkt {
+ u8 opcode;
+ u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 runt:4;
+ u8 filter_hit:4;
+ u8 :6;
+ u8 err:1;
+ u8 trunc:1;
+#else
+ u8 filter_hit:4;
+ u8 runt:4;
+ u8 trunc:1;
+ u8 err:1;
+ u8 :6;
+#endif
+ __be16 rsvd;
+ __be16 len;
+ __be64 tstamp;
+};
+
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 params;
+#define L2T_W_INFO(x) ((x) << 2)
+#define L2T_W_PORT(x) ((x) << 8)
+#define L2T_W_NOREPLY(x) ((x) << 15)
+ __be16 l2t_idx;
+ __be16 vlan;
+ u8 dst_mac[6];
+};
+
+struct cpl_l2t_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_rdma_terminate {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+};
+
+struct cpl_sge_egr_update {
+ __be32 opcode_qid;
+#define EGR_QID(x) ((x) & 0x1FFFF)
+ __be16 cidx;
+ __be16 pidx;
+};
+
+struct cpl_fw4_pld {
+ u8 opcode;
+ u8 rsvd0[3];
+ u8 type;
+ u8 rsvd1;
+ __be16 len;
+ __be64 data;
+ __be64 rsvd2;
+};
+
+struct cpl_fw6_pld {
+ u8 opcode;
+ u8 rsvd[5];
+ __be16 len;
+ __be64 data[4];
+};
+
+struct cpl_fw4_msg {
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[2];
+};
+
+struct cpl_fw4_ack {
+ union opcode_tid ot;
+ u8 credits;
+ u8 rsvd0[2];
+ u8 seq_vld;
+ __be32 snd_nxt;
+ __be32 snd_una;
+ __be64 rsvd1;
+};
+
+struct cpl_fw6_msg {
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[4];
+};
+
+enum {
+ ULP_TX_MEM_READ = 2,
+ ULP_TX_MEM_WRITE = 3,
+ ULP_TX_PKT = 4
+};
+
+enum {
+ ULP_TX_SC_NOOP = 0x80,
+ ULP_TX_SC_IMM = 0x81,
+ ULP_TX_SC_DSGL = 0x82,
+ ULP_TX_SC_ISGL = 0x83
+};
+
+struct ulptx_sge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct ulptx_sgl {
+ __be32 cmd_nsge;
+#define ULPTX_CMD(x) ((x) << 24)
+#define ULPTX_NSGE(x) ((x) << 0)
+ __be32 len0;
+ __be64 addr0;
+ struct ulptx_sge_pair sge[0];
+};
+
+struct ulp_mem_io {
+ WR_HDR;
+ __be32 cmd;
+#define ULP_MEMIO_ORDER(x) ((x) << 23)
+ __be32 len16; /* command length */
+ __be32 dlen; /* data length in 32-byte units */
+#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
+ __be32 lock_addr;
+#define ULP_MEMIO_ADDR(x) ((x) << 0)
+#define ULP_MEMIO_LOCK(x) ((x) << 31)
+};
+
+#endif /* __T4_MSG_H */
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
new file mode 100644
index 000000000000..5ed56483cbc2
--- /dev/null
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -0,0 +1,878 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_REGS_H
+#define __T4_REGS_H
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
+#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
+#define SGE_PF_KDOORBELL 0x0
+#define QID_MASK 0xffff8000U
+#define QID_SHIFT 15
+#define QID(x) ((x) << QID_SHIFT)
+#define DBPRIO 0x00004000U
+#define PIDX_MASK 0x00003fffU
+#define PIDX_SHIFT 0
+#define PIDX(x) ((x) << PIDX_SHIFT)
+
+#define SGE_PF_GTS 0x4
+#define INGRESSQID_MASK 0xffff0000U
+#define INGRESSQID_SHIFT 16
+#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
+#define TIMERREG_MASK 0x0000e000U
+#define TIMERREG_SHIFT 13
+#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
+#define SEINTARM_MASK 0x00001000U
+#define SEINTARM_SHIFT 12
+#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
+#define CIDXINC_MASK 0x00000fffU
+#define CIDXINC_SHIFT 0
+#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
+
+#define SGE_CONTROL 0x1008
+#define DCASYSTYPE 0x00080000U
+#define RXPKTCPLMODE 0x00040000U
+#define EGRSTATUSPAGESIZE 0x00020000U
+#define PKTSHIFT_MASK 0x00001c00U
+#define PKTSHIFT_SHIFT 10
+#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
+#define INGPCIEBOUNDARY_MASK 0x00000380U
+#define INGPCIEBOUNDARY_SHIFT 7
+#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
+#define INGPADBOUNDARY_MASK 0x00000070U
+#define INGPADBOUNDARY_SHIFT 4
+#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
+#define EGRPCIEBOUNDARY_MASK 0x0000000eU
+#define EGRPCIEBOUNDARY_SHIFT 1
+#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
+#define GLOBALENABLE 0x00000001U
+
+#define SGE_HOST_PAGE_SIZE 0x100c
+#define HOSTPAGESIZEPF0_MASK 0x0000000fU
+#define HOSTPAGESIZEPF0_SHIFT 0
+#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+#define QUEUESPERPAGEPF0_MASK 0x0000000fU
+#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+
+#define SGE_INT_CAUSE1 0x1024
+#define SGE_INT_CAUSE2 0x1030
+#define SGE_INT_CAUSE3 0x103c
+#define ERR_FLM_DBP 0x80000000U
+#define ERR_FLM_IDMA1 0x40000000U
+#define ERR_FLM_IDMA0 0x20000000U
+#define ERR_FLM_HINT 0x10000000U
+#define ERR_PCIE_ERROR3 0x08000000U
+#define ERR_PCIE_ERROR2 0x04000000U
+#define ERR_PCIE_ERROR1 0x02000000U
+#define ERR_PCIE_ERROR0 0x01000000U
+#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
+#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
+#define ERR_INVALID_CIDX_INC 0x00200000U
+#define ERR_ITP_TIME_PAUSED 0x00100000U
+#define ERR_CPL_OPCODE_0 0x00080000U
+#define ERR_DROPPED_DB 0x00040000U
+#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
+#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
+#define ERR_BAD_DB_PIDX3 0x00008000U
+#define ERR_BAD_DB_PIDX2 0x00004000U
+#define ERR_BAD_DB_PIDX1 0x00002000U
+#define ERR_BAD_DB_PIDX0 0x00001000U
+#define ERR_ING_PCIE_CHAN 0x00000800U
+#define ERR_ING_CTXT_PRIO 0x00000400U
+#define ERR_EGR_CTXT_PRIO 0x00000200U
+#define DBFIFO_HP_INT 0x00000100U
+#define DBFIFO_LP_INT 0x00000080U
+#define REG_ADDRESS_ERR 0x00000040U
+#define INGRESS_SIZE_ERR 0x00000020U
+#define EGRESS_SIZE_ERR 0x00000010U
+#define ERR_INV_CTXT3 0x00000008U
+#define ERR_INV_CTXT2 0x00000004U
+#define ERR_INV_CTXT1 0x00000002U
+#define ERR_INV_CTXT0 0x00000001U
+
+#define SGE_INT_ENABLE3 0x1040
+#define SGE_FL_BUFFER_SIZE0 0x1044
+#define SGE_FL_BUFFER_SIZE1 0x1048
+#define SGE_INGRESS_RX_THRESHOLD 0x10a0
+#define THRESHOLD_0_MASK 0x3f000000U
+#define THRESHOLD_0_SHIFT 24
+#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
+#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
+#define THRESHOLD_1_MASK 0x003f0000U
+#define THRESHOLD_1_SHIFT 16
+#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
+#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
+#define THRESHOLD_2_MASK 0x00003f00U
+#define THRESHOLD_2_SHIFT 8
+#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
+#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
+#define THRESHOLD_3_MASK 0x0000003fU
+#define THRESHOLD_3_SHIFT 0
+#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
+#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
+
+#define SGE_TIMER_VALUE_0_AND_1 0x10b8
+#define TIMERVALUE0_MASK 0xffff0000U
+#define TIMERVALUE0_SHIFT 16
+#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
+#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
+#define TIMERVALUE1_MASK 0x0000ffffU
+#define TIMERVALUE1_SHIFT 0
+#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
+#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
+
+#define SGE_TIMER_VALUE_2_AND_3 0x10bc
+#define SGE_TIMER_VALUE_4_AND_5 0x10c0
+#define SGE_DEBUG_INDEX 0x10cc
+#define SGE_DEBUG_DATA_HIGH 0x10d0
+#define SGE_DEBUG_DATA_LOW 0x10d4
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+
+#define PCIE_PF_CLI 0x44
+#define PCIE_INT_CAUSE 0x3004
+#define UNXSPLCPLERR 0x20000000U
+#define PCIEPINT 0x10000000U
+#define PCIESINT 0x08000000U
+#define RPLPERR 0x04000000U
+#define RXWRPERR 0x02000000U
+#define RXCPLPERR 0x01000000U
+#define PIOTAGPERR 0x00800000U
+#define MATAGPERR 0x00400000U
+#define INTXCLRPERR 0x00200000U
+#define FIDPERR 0x00100000U
+#define CFGSNPPERR 0x00080000U
+#define HRSPPERR 0x00040000U
+#define HREQPERR 0x00020000U
+#define HCNTPERR 0x00010000U
+#define DRSPPERR 0x00008000U
+#define DREQPERR 0x00004000U
+#define DCNTPERR 0x00002000U
+#define CRSPPERR 0x00001000U
+#define CREQPERR 0x00000800U
+#define CCNTPERR 0x00000400U
+#define TARTAGPERR 0x00000200U
+#define PIOREQPERR 0x00000100U
+#define PIOCPLPERR 0x00000080U
+#define MSIXDIPERR 0x00000040U
+#define MSIXDATAPERR 0x00000020U
+#define MSIXADDRHPERR 0x00000010U
+#define MSIXADDRLPERR 0x00000008U
+#define MSIDATAPERR 0x00000004U
+#define MSIADDRHPERR 0x00000002U
+#define MSIADDRLPERR 0x00000001U
+
+#define PCIE_NONFAT_ERR 0x3010
+#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
+#define PCIEOFST_MASK 0xfffffc00U
+#define BIR_MASK 0x00000300U
+#define BIR_SHIFT 8
+#define BIR(x) ((x) << BIR_SHIFT)
+#define WINDOW_MASK 0x000000ffU
+#define WINDOW_SHIFT 0
+#define WINDOW(x) ((x) << WINDOW_SHIFT)
+
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
+#define RNPP 0x80000000U
+#define RPCP 0x20000000U
+#define RCIP 0x08000000U
+#define RCCP 0x04000000U
+#define RFTP 0x00800000U
+#define PTRP 0x00100000U
+
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
+#define TPCP 0x40000000U
+#define TNPP 0x20000000U
+#define TFTP 0x10000000U
+#define TCAP 0x08000000U
+#define TCIP 0x04000000U
+#define RCAP 0x02000000U
+#define PLUP 0x00800000U
+#define PLDN 0x00400000U
+#define OTDD 0x00200000U
+#define GTRP 0x00100000U
+#define RDPE 0x00040000U
+#define TDCE 0x00020000U
+#define TDUE 0x00010000U
+
+#define MC_INT_CAUSE 0x7518
+#define ECC_UE_INT_CAUSE 0x00000004U
+#define ECC_CE_INT_CAUSE 0x00000002U
+#define PERR_INT_CAUSE 0x00000001U
+
+#define MC_ECC_STATUS 0x751c
+#define ECC_CECNT_MASK 0xffff0000U
+#define ECC_CECNT_SHIFT 16
+#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
+#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
+#define ECC_UECNT_MASK 0x0000ffffU
+#define ECC_UECNT_SHIFT 0
+#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
+#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
+
+#define MC_BIST_CMD 0x7600
+#define START_BIST 0x80000000U
+#define BIST_CMD_GAP_MASK 0x0000ff00U
+#define BIST_CMD_GAP_SHIFT 8
+#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
+#define BIST_OPCODE_MASK 0x00000003U
+#define BIST_OPCODE_SHIFT 0
+#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
+
+#define MC_BIST_CMD_ADDR 0x7604
+#define MC_BIST_CMD_LEN 0x7608
+#define MC_BIST_DATA_PATTERN 0x760c
+#define BIST_DATA_TYPE_MASK 0x0000000fU
+#define BIST_DATA_TYPE_SHIFT 0
+#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
+
+#define MC_BIST_STATUS_RDATA 0x7688
+
+#define MA_EXT_MEMORY_BAR 0x77c8
+#define EXT_MEM_SIZE_MASK 0x00000fffU
+#define EXT_MEM_SIZE_SHIFT 0
+#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
+
+#define MA_TARGET_MEM_ENABLE 0x77d8
+#define EXT_MEM_ENABLE 0x00000004U
+#define EDRAM1_ENABLE 0x00000002U
+#define EDRAM0_ENABLE 0x00000001U
+
+#define MA_INT_CAUSE 0x77e0
+#define MEM_PERR_INT_CAUSE 0x00000002U
+#define MEM_WRAP_INT_CAUSE 0x00000001U
+
+#define MA_INT_WRAP_STATUS 0x77e4
+#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
+#define MEM_WRAP_ADDRESS_SHIFT 4
+#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
+#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
+#define MEM_WRAP_CLIENT_NUM_SHIFT 0
+#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
+
+#define MA_PARITY_ERROR_STATUS 0x77f4
+
+#define EDC_0_BASE_ADDR 0x7900
+
+#define EDC_BIST_CMD 0x7904
+#define EDC_BIST_CMD_ADDR 0x7908
+#define EDC_BIST_CMD_LEN 0x790c
+#define EDC_BIST_DATA_PATTERN 0x7910
+#define EDC_BIST_STATUS_RDATA 0x7928
+#define EDC_INT_CAUSE 0x7978
+#define ECC_UE_PAR 0x00000020U
+#define ECC_CE_PAR 0x00000010U
+#define PERR_PAR_CAUSE 0x00000008U
+
+#define EDC_ECC_STATUS 0x797c
+
+#define EDC_1_BASE_ADDR 0x7980
+
+#define CIM_PF_MAILBOX_DATA 0x240
+#define CIM_PF_MAILBOX_CTRL 0x280
+#define MBMSGVALID 0x00000008U
+#define MBINTREQ 0x00000004U
+#define MBOWNER_MASK 0x00000003U
+#define MBOWNER_SHIFT 0
+#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
+#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
+
+#define CIM_PF_HOST_INT_CAUSE 0x28c
+#define MBMSGRDYINT 0x00080000U
+
+#define CIM_HOST_INT_CAUSE 0x7b2c
+#define TIEQOUTPARERRINT 0x00100000U
+#define TIEQINPARERRINT 0x00080000U
+#define MBHOSTPARERR 0x00040000U
+#define MBUPPARERR 0x00020000U
+#define IBQPARERR 0x0001f800U
+#define IBQTP0PARERR 0x00010000U
+#define IBQTP1PARERR 0x00008000U
+#define IBQULPPARERR 0x00004000U
+#define IBQSGELOPARERR 0x00002000U
+#define IBQSGEHIPARERR 0x00001000U
+#define IBQNCSIPARERR 0x00000800U
+#define OBQPARERR 0x000007e0U
+#define OBQULP0PARERR 0x00000400U
+#define OBQULP1PARERR 0x00000200U
+#define OBQULP2PARERR 0x00000100U
+#define OBQULP3PARERR 0x00000080U
+#define OBQSGEPARERR 0x00000040U
+#define OBQNCSIPARERR 0x00000020U
+#define PREFDROPINT 0x00000002U
+#define UPACCNONZERO 0x00000001U
+
+#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
+#define EEPROMWRINT 0x40000000U
+#define TIMEOUTMAINT 0x20000000U
+#define TIMEOUTINT 0x10000000U
+#define RSPOVRLOOKUPINT 0x08000000U
+#define REQOVRLOOKUPINT 0x04000000U
+#define BLKWRPLINT 0x02000000U
+#define BLKRDPLINT 0x01000000U
+#define SGLWRPLINT 0x00800000U
+#define SGLRDPLINT 0x00400000U
+#define BLKWRCTLINT 0x00200000U
+#define BLKRDCTLINT 0x00100000U
+#define SGLWRCTLINT 0x00080000U
+#define SGLRDCTLINT 0x00040000U
+#define BLKWREEPROMINT 0x00020000U
+#define BLKRDEEPROMINT 0x00010000U
+#define SGLWREEPROMINT 0x00008000U
+#define SGLRDEEPROMINT 0x00004000U
+#define BLKWRFLASHINT 0x00002000U
+#define BLKRDFLASHINT 0x00001000U
+#define SGLWRFLASHINT 0x00000800U
+#define SGLRDFLASHINT 0x00000400U
+#define BLKWRBOOTINT 0x00000200U
+#define BLKRDBOOTINT 0x00000100U
+#define SGLWRBOOTINT 0x00000080U
+#define SGLRDBOOTINT 0x00000040U
+#define ILLWRBEINT 0x00000020U
+#define ILLRDBEINT 0x00000010U
+#define ILLRDINT 0x00000008U
+#define ILLWRINT 0x00000004U
+#define ILLTRANSINT 0x00000002U
+#define RSVDSPACEINT 0x00000001U
+
+#define TP_OUT_CONFIG 0x7d04
+#define VLANEXTENABLE_MASK 0x0000f000U
+#define VLANEXTENABLE_SHIFT 12
+
+#define TP_PARA_REG2 0x7d68
+#define MAXRXDATA_MASK 0xffff0000U
+#define MAXRXDATA_SHIFT 16
+#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
+
+#define TP_TIMER_RESOLUTION 0x7d90
+#define TIMERRESOLUTION_MASK 0x00ff0000U
+#define TIMERRESOLUTION_SHIFT 16
+#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
+
+#define TP_SHIFT_CNT 0x7dc0
+
+#define TP_CCTRL_TABLE 0x7ddc
+#define TP_MTU_TABLE 0x7de4
+#define MTUINDEX_MASK 0xff000000U
+#define MTUINDEX_SHIFT 24
+#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
+#define MTUWIDTH_MASK 0x000f0000U
+#define MTUWIDTH_SHIFT 16
+#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
+#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
+#define MTUVALUE_MASK 0x00003fffU
+#define MTUVALUE_SHIFT 0
+#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
+#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
+
+#define TP_RSS_LKP_TABLE 0x7dec
+#define LKPTBLROWVLD 0x80000000U
+#define LKPTBLQUEUE1_MASK 0x000ffc00U
+#define LKPTBLQUEUE1_SHIFT 10
+#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
+#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
+#define LKPTBLQUEUE0_MASK 0x000003ffU
+#define LKPTBLQUEUE0_SHIFT 0
+#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
+#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
+
+#define TP_PIO_ADDR 0x7e40
+#define TP_PIO_DATA 0x7e44
+#define TP_MIB_INDEX 0x7e50
+#define TP_MIB_DATA 0x7e54
+#define TP_INT_CAUSE 0x7e74
+#define FLMTXFLSTEMPTY 0x40000000U
+
+#define TP_INGRESS_CONFIG 0x141
+#define VNIC 0x00000800U
+#define CSUM_HAS_PSEUDO_HDR 0x00000400U
+#define RM_OVLAN 0x00000200U
+#define LOOKUPEVERYPKT 0x00000100U
+
+#define TP_MIB_MAC_IN_ERR_0 0x0
+#define TP_MIB_TCP_OUT_RST 0xc
+#define TP_MIB_TCP_IN_SEG_HI 0x10
+#define TP_MIB_TCP_IN_SEG_LO 0x11
+#define TP_MIB_TCP_OUT_SEG_HI 0x12
+#define TP_MIB_TCP_OUT_SEG_LO 0x13
+#define TP_MIB_TCP_RXT_SEG_HI 0x14
+#define TP_MIB_TCP_RXT_SEG_LO 0x15
+#define TP_MIB_TNL_CNG_DROP_0 0x18
+#define TP_MIB_TCP_V6IN_ERR_0 0x28
+#define TP_MIB_TCP_V6OUT_RST 0x2c
+#define TP_MIB_OFD_ARP_DROP 0x36
+#define TP_MIB_TNL_DROP_0 0x44
+#define TP_MIB_OFD_VLN_DROP_0 0x58
+
+#define ULP_TX_INT_CAUSE 0x8dcc
+#define PBL_BOUND_ERR_CH3 0x80000000U
+#define PBL_BOUND_ERR_CH2 0x40000000U
+#define PBL_BOUND_ERR_CH1 0x20000000U
+#define PBL_BOUND_ERR_CH0 0x10000000U
+
+#define PM_RX_INT_CAUSE 0x8fdc
+#define ZERO_E_CMD_ERROR 0x00400000U
+#define PMRX_FRAMING_ERROR 0x003ffff0U
+#define OCSPI_PAR_ERROR 0x00000008U
+#define DB_OPTIONS_PAR_ERROR 0x00000004U
+#define IESPI_PAR_ERROR 0x00000002U
+#define E_PCMD_PAR_ERROR 0x00000001U
+
+#define PM_TX_INT_CAUSE 0x8ffc
+#define PCMD_LEN_OVFL0 0x80000000U
+#define PCMD_LEN_OVFL1 0x40000000U
+#define PCMD_LEN_OVFL2 0x20000000U
+#define ZERO_C_CMD_ERROR 0x10000000U
+#define PMTX_FRAMING_ERROR 0x0ffffff0U
+#define OESPI_PAR_ERROR 0x00000008U
+#define ICSPI_PAR_ERROR 0x00000002U
+#define C_PCMD_PAR_ERROR 0x00000001U
+
+#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define MPS_CMN_CTL 0x9000
+#define NUMPORTS_MASK 0x00000003U
+#define NUMPORTS_SHIFT 0
+#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
+
+#define MPS_INT_CAUSE 0x9008
+#define STATINT 0x00000020U
+#define TXINT 0x00000010U
+#define RXINT 0x00000008U
+#define TRCINT 0x00000004U
+#define CLSINT 0x00000002U
+#define PLINT 0x00000001U
+
+#define MPS_TX_INT_CAUSE 0x9408
+#define PORTERR 0x00010000U
+#define FRMERR 0x00008000U
+#define SECNTERR 0x00004000U
+#define BUBBLE 0x00002000U
+#define TXDESCFIFO 0x00001e00U
+#define TXDATAFIFO 0x000001e0U
+#define NCSIFIFO 0x00000010U
+#define TPFIFO 0x0000000fU
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define MPS_TRC_CFG 0x9800
+#define TRCFIFOEMPTY 0x00000010U
+#define TRCIGNOREDROPINPUT 0x00000008U
+#define TRCKEEPDUPLICATES 0x00000004U
+#define TRCEN 0x00000002U
+#define TRCMULTIFILTER 0x00000001U
+
+#define MPS_TRC_RSS_CONTROL 0x9808
+#define RSSCONTROL_MASK 0x00ff0000U
+#define RSSCONTROL_SHIFT 16
+#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
+#define QUEUENUMBER_MASK 0x0000ffffU
+#define QUEUENUMBER_SHIFT 0
+#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
+#define TFINVERTMATCH 0x01000000U
+#define TFPKTTOOLARGE 0x00800000U
+#define TFEN 0x00400000U
+#define TFPORT_MASK 0x003c0000U
+#define TFPORT_SHIFT 18
+#define TFPORT(x) ((x) << TFPORT_SHIFT)
+#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
+#define TFDROP 0x00020000U
+#define TFSOPEOPERR 0x00010000U
+#define TFLENGTH_MASK 0x00001f00U
+#define TFLENGTH_SHIFT 8
+#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
+#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
+#define TFOFFSET_MASK 0x0000001fU
+#define TFOFFSET_SHIFT 0
+#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
+#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
+
+#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
+#define TFMINPKTSIZE_MASK 0x01ff0000U
+#define TFMINPKTSIZE_SHIFT 16
+#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
+#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
+#define TFCAPTUREMAX_MASK 0x00003fffU
+#define TFCAPTUREMAX_SHIFT 0
+#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
+#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
+
+#define MPS_TRC_INT_CAUSE 0x985c
+#define MISCPERR 0x00000100U
+#define PKTFIFO 0x000000f0U
+#define FILTMEM 0x0000000fU
+
+#define MPS_TRC_FILTER0_MATCH 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
+#define MPS_TRC_FILTER1_MATCH 0x9d00
+#define MPS_CLS_INT_CAUSE 0xd028
+#define PLERRENB 0x00000008U
+#define HASHSRAM 0x00000004U
+#define MATCHTCAM 0x00000002U
+#define MATCHSRAM 0x00000001U
+
+#define MPS_RX_PERR_INT_CAUSE 0x11074
+
+#define CPL_INTR_CAUSE 0x19054
+#define CIM_OP_MAP_PERR 0x00000020U
+#define CIM_OVFL_ERROR 0x00000010U
+#define TP_FRAMING_ERROR 0x00000008U
+#define SGE_FRAMING_ERROR 0x00000004U
+#define CIM_FRAMING_ERROR 0x00000002U
+#define ZERO_SWITCH_ERROR 0x00000001U
+
+#define SMB_INT_CAUSE 0x19090
+#define MSTTXFIFOPARINT 0x00200000U
+#define MSTRXFIFOPARINT 0x00100000U
+#define SLVFIFOPARINT 0x00080000U
+
+#define ULP_RX_INT_CAUSE 0x19158
+#define ULP_RX_ISCSI_TAGMASK 0x19164
+#define ULP_RX_ISCSI_PSZ 0x19168
+#define HPZ3_MASK 0x0f000000U
+#define HPZ3_SHIFT 24
+#define HPZ3(x) ((x) << HPZ3_SHIFT)
+#define HPZ2_MASK 0x000f0000U
+#define HPZ2_SHIFT 16
+#define HPZ2(x) ((x) << HPZ2_SHIFT)
+#define HPZ1_MASK 0x00000f00U
+#define HPZ1_SHIFT 8
+#define HPZ1(x) ((x) << HPZ1_SHIFT)
+#define HPZ0_MASK 0x0000000fU
+#define HPZ0_SHIFT 0
+#define HPZ0(x) ((x) << HPZ0_SHIFT)
+
+#define ULP_RX_TDDP_PSZ 0x19178
+
+#define SF_DATA 0x193f8
+#define SF_OP 0x193fc
+#define BUSY 0x80000000U
+#define SF_LOCK 0x00000010U
+#define SF_CONT 0x00000008U
+#define BYTECNT_MASK 0x00000006U
+#define BYTECNT_SHIFT 1
+#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
+#define OP_WR 0x00000001U
+
+#define PL_PF_INT_CAUSE 0x3c0
+#define PFSW 0x00000008U
+#define PFSGE 0x00000004U
+#define PFCIM 0x00000002U
+#define PFMPS 0x00000001U
+
+#define PL_PF_INT_ENABLE 0x3c4
+#define PL_PF_CTL 0x3c8
+#define SWINT 0x00000001U
+
+#define PL_WHOAMI 0x19400
+#define SOURCEPF_MASK 0x00000700U
+#define SOURCEPF_SHIFT 8
+#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
+#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
+#define ISVF 0x00000080U
+#define VFID_MASK 0x0000007fU
+#define VFID_SHIFT 0
+#define VFID(x) ((x) << VFID_SHIFT)
+#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
+
+#define PL_INT_CAUSE 0x1940c
+#define ULP_TX 0x08000000U
+#define SGE 0x04000000U
+#define HMA 0x02000000U
+#define CPL_SWITCH 0x01000000U
+#define ULP_RX 0x00800000U
+#define PM_RX 0x00400000U
+#define PM_TX 0x00200000U
+#define MA 0x00100000U
+#define TP 0x00080000U
+#define LE 0x00040000U
+#define EDC1 0x00020000U
+#define EDC0 0x00010000U
+#define MC 0x00008000U
+#define PCIE 0x00004000U
+#define PMU 0x00002000U
+#define XGMAC_KR1 0x00001000U
+#define XGMAC_KR0 0x00000800U
+#define XGMAC1 0x00000400U
+#define XGMAC0 0x00000200U
+#define SMB 0x00000100U
+#define SF 0x00000080U
+#define PL 0x00000040U
+#define NCSI 0x00000020U
+#define MPS 0x00000010U
+#define MI 0x00000008U
+#define DBG 0x00000004U
+#define I2CM 0x00000002U
+#define CIM 0x00000001U
+
+#define PL_INT_MAP0 0x19414
+#define PL_RST 0x19428
+#define PIORST 0x00000002U
+#define PIORSTMODE 0x00000001U
+
+#define PL_PL_INT_CAUSE 0x19430
+#define FATALPERR 0x00000010U
+#define PERRVFID 0x00000001U
+
+#define PL_REV 0x1943c
+
+#define LE_DB_CONFIG 0x19c04
+#define HASHEN 0x00100000U
+
+#define LE_DB_SERVER_INDEX 0x19c18
+#define LE_DB_ACT_CNT_IPV4 0x19c20
+#define LE_DB_ACT_CNT_IPV6 0x19c24
+
+#define LE_DB_INT_CAUSE 0x19c3c
+#define REQQPARERR 0x00010000U
+#define UNKNOWNCMD 0x00008000U
+#define PARITYERR 0x00000040U
+#define LIPMISS 0x00000020U
+#define LIP0 0x00000010U
+
+#define LE_DB_TID_HASHBASE 0x19df8
+
+#define NCSI_INT_CAUSE 0x1a0d8
+#define CIM_DM_PRTY_ERR 0x00000100U
+#define MPS_DM_PRTY_ERR 0x00000080U
+#define TXFIFO_PRTY_ERR 0x00000002U
+#define RXFIFO_PRTY_ERR 0x00000001U
+
+#define XGMAC_PORT_CFG2 0x1018
+#define PATEN 0x00040000U
+#define MAGICEN 0x00020000U
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0 0x10c0
+#define XGMAC_PORT_EPIO_DATA1 0x10c4
+#define XGMAC_PORT_EPIO_DATA2 0x10c8
+#define XGMAC_PORT_EPIO_DATA3 0x10cc
+#define XGMAC_PORT_EPIO_OP 0x10d0
+#define EPIOWR 0x00000100U
+#define ADDRESS_MASK 0x000000ffU
+#define ADDRESS_SHIFT 0
+#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
+
+#define XGMAC_PORT_INT_CAUSE 0x10dc
+#endif /* __T4_REGS_H */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
new file mode 100644
index 000000000000..3393d05a388a
--- /dev/null
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -0,0 +1,1580 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+#define FW_T4VF_SGE_BASE_ADDR 0x0000
+#define FW_T4VF_MPS_BASE_ADDR 0x0100
+#define FW_T4VF_PL_BASE_ADDR 0x0200
+#define FW_T4VF_MBDATA_BASE_ADDR 0x0240
+#define FW_T4VF_CIM_BASE_ADDR 0x0300
+
+enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
+ FW_ETH_TX_PKT_WR = 0x08,
+ FW_FLOWC_WR = 0x0a,
+ FW_OFLD_TX_DATA_WR = 0x0b,
+ FW_CMD_WR = 0x10,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_RI_RES_WR = 0x0c,
+ FW_RI_INIT_WR = 0x0d,
+ FW_RI_RDMA_WRITE_WR = 0x14,
+ FW_RI_SEND_WR = 0x15,
+ FW_RI_RDMA_READ_WR = 0x16,
+ FW_RI_RECV_WR = 0x17,
+ FW_RI_BIND_MW_WR = 0x18,
+ FW_RI_FR_NSMR_WR = 0x19,
+ FW_RI_INV_LSTAG_WR = 0x1a,
+ FW_LASTC2E_WR = 0x40
+};
+
+struct fw_wr_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define FW_WR_OP(x) ((x) << 24)
+#define FW_WR_ATOMIC(x) ((x) << 23)
+#define FW_WR_FLUSH(x) ((x) << 22)
+#define FW_WR_COMPL(x) ((x) << 21)
+#define FW_WR_IMMDLEN(x) ((x) << 0)
+
+#define FW_WR_EQUIQ (1U << 31)
+#define FW_WR_EQUEQ (1U << 30)
+#define FW_WR_FLOWID(x) ((x) << 8)
+#define FW_WR_LEN16(x) ((x) << 0)
+
+struct fw_ulptx_wr {
+ __be32 op_to_compl;
+ __be32 flowid_len16;
+ u64 cookie;
+};
+
+struct fw_tp_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+};
+
+struct fw_eth_tx_pkt_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+};
+
+enum fw_flowc_mnem {
+ FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
+ FW_FLOWC_MNEM_CH,
+ FW_FLOWC_MNEM_PORT,
+ FW_FLOWC_MNEM_IQID,
+ FW_FLOWC_MNEM_SNDNXT,
+ FW_FLOWC_MNEM_RCVNXT,
+ FW_FLOWC_MNEM_SNDBUF,
+ FW_FLOWC_MNEM_MSS,
+};
+
+struct fw_flowc_mnemval {
+ u8 mnemonic;
+ u8 r4[3];
+ __be32 val;
+};
+
+struct fw_flowc_wr {
+ __be32 op_to_nparams;
+#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0)
+ __be32 flowid_len16;
+ struct fw_flowc_mnemval mnemval[0];
+};
+
+struct fw_ofld_tx_data_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 plen;
+ __be32 tunnel_to_proxy;
+#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19)
+#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18)
+#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17)
+#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16)
+#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15)
+#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14)
+#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10)
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
+};
+
+struct fw_cmd_wr {
+ __be32 op_dma;
+#define FW_CMD_WR_DMA (1U << 17)
+ __be32 len16_pkd;
+ __be64 cookie_daddr;
+};
+
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ u8 ethmacdst[6];
+ u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+#define FW_CMD_MAX_TIMEOUT 3000
+
+enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
+ FW_RESET_CMD = 0x03,
+ FW_HELLO_CMD = 0x04,
+ FW_BYE_CMD = 0x05,
+ FW_INITIALIZE_CMD = 0x06,
+ FW_CAPS_CONFIG_CMD = 0x07,
+ FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
+ FW_IQ_CMD = 0x10,
+ FW_EQ_MNGT_CMD = 0x11,
+ FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
+ FW_EQ_OFLD_CMD = 0x21,
+ FW_VI_CMD = 0x14,
+ FW_VI_MAC_CMD = 0x15,
+ FW_VI_RXMODE_CMD = 0x16,
+ FW_VI_ENABLE_CMD = 0x17,
+ FW_ACL_MAC_CMD = 0x18,
+ FW_ACL_VLAN_CMD = 0x19,
+ FW_VI_STATS_CMD = 0x1a,
+ FW_PORT_CMD = 0x1b,
+ FW_PORT_STATS_CMD = 0x1c,
+ FW_PORT_LB_STATS_CMD = 0x1d,
+ FW_PORT_TRACE_CMD = 0x1e,
+ FW_PORT_TRACE_MMAP_CMD = 0x1f,
+ FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
+ FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_LASTC2E_CMD = 0x40,
+ FW_ERROR_CMD = 0x80,
+ FW_DEBUG_CMD = 0x81,
+};
+
+enum fw_cmd_cap {
+ FW_CMD_CAP_PF = 0x01,
+ FW_CMD_CAP_DMAQ = 0x02,
+ FW_CMD_CAP_PORT = 0x04,
+ FW_CMD_CAP_PORTPROMISC = 0x08,
+ FW_CMD_CAP_PORTSTATS = 0x10,
+ FW_CMD_CAP_VF = 0x80,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define FW_CMD_OP(x) ((x) << 24)
+#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
+#define FW_CMD_REQUEST (1U << 23)
+#define FW_CMD_READ (1U << 22)
+#define FW_CMD_WRITE (1U << 21)
+#define FW_CMD_EXEC (1U << 20)
+#define FW_CMD_RAMASK(x) ((x) << 20)
+#define FW_CMD_RETVAL(x) ((x) << 8)
+#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
+#define FW_CMD_LEN16(x) ((x) << 0)
+
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
+ FW_LDST_ADDRSPC_SGE_EGRC = 0x0008,
+ FW_LDST_ADDRSPC_SGE_INGC = 0x0009,
+ FW_LDST_ADDRSPC_SGE_FLMC = 0x000a,
+ FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+ FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
+ FW_LDST_ADDRSPC_TP_MIB = 0x0012,
+ FW_LDST_ADDRSPC_MDIO = 0x0018,
+ FW_LDST_ADDRSPC_MPS = 0x0020,
+ FW_LDST_ADDRSPC_FUNC = 0x0028
+};
+
+enum fw_ldst_mps_fid {
+ FW_LDST_MPS_ATRB,
+ FW_LDST_MPS_RPLC
+};
+
+enum fw_ldst_func_access_ctl {
+ FW_LDST_FUNC_ACC_CTL_VIID,
+ FW_LDST_FUNC_ACC_CTL_FID
+};
+
+enum fw_ldst_func_mod_index {
+ FW_LDST_FUNC_MPS
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_pkd;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ u8 access_ctl;
+ u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ } u;
+};
+
+#define FW_LDST_CMD_MSG(x) ((x) << 31)
+#define FW_LDST_CMD_PADDR(x) ((x) << 8)
+#define FW_LDST_CMD_MMD(x) ((x) << 0)
+#define FW_LDST_CMD_FID(x) ((x) << 15)
+#define FW_LDST_CMD_CTL(x) ((x) << 0)
+#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
+
+struct fw_reset_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 val;
+ __be32 r3;
+};
+
+struct fw_hello_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 err_to_mbasyncnot;
+#define FW_HELLO_CMD_ERR (1U << 31)
+#define FW_HELLO_CMD_INIT (1U << 30)
+#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
+#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
+#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24)
+#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
+ __be32 fwrev;
+};
+
+struct fw_bye_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+struct fw_initialize_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+enum fw_caps_config_hm {
+ FW_CAPS_CONFIG_HM_PCIE = 0x00000001,
+ FW_CAPS_CONFIG_HM_PL = 0x00000002,
+ FW_CAPS_CONFIG_HM_SGE = 0x00000004,
+ FW_CAPS_CONFIG_HM_CIM = 0x00000008,
+ FW_CAPS_CONFIG_HM_ULPTX = 0x00000010,
+ FW_CAPS_CONFIG_HM_TP = 0x00000020,
+ FW_CAPS_CONFIG_HM_ULPRX = 0x00000040,
+ FW_CAPS_CONFIG_HM_PMRX = 0x00000080,
+ FW_CAPS_CONFIG_HM_PMTX = 0x00000100,
+ FW_CAPS_CONFIG_HM_MC = 0x00000200,
+ FW_CAPS_CONFIG_HM_LE = 0x00000400,
+ FW_CAPS_CONFIG_HM_MPS = 0x00000800,
+ FW_CAPS_CONFIG_HM_XGMAC = 0x00001000,
+ FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000,
+ FW_CAPS_CONFIG_HM_T4DBG = 0x00004000,
+ FW_CAPS_CONFIG_HM_MI = 0x00008000,
+ FW_CAPS_CONFIG_HM_I2CM = 0x00010000,
+ FW_CAPS_CONFIG_HM_NCSI = 0x00020000,
+ FW_CAPS_CONFIG_HM_SMB = 0x00040000,
+ FW_CAPS_CONFIG_HM_MA = 0x00080000,
+ FW_CAPS_CONFIG_HM_EDRAM = 0x00100000,
+ FW_CAPS_CONFIG_HM_PMU = 0x00200000,
+ FW_CAPS_CONFIG_HM_UART = 0x00400000,
+ FW_CAPS_CONFIG_HM_SF = 0x00800000,
+};
+
+enum fw_caps_config_nbm {
+ FW_CAPS_CONFIG_NBM_IPMI = 0x00000001,
+ FW_CAPS_CONFIG_NBM_NCSI = 0x00000002,
+};
+
+enum fw_caps_config_link {
+ FW_CAPS_CONFIG_LINK_PPP = 0x00000001,
+ FW_CAPS_CONFIG_LINK_QFC = 0x00000002,
+ FW_CAPS_CONFIG_LINK_DCBX = 0x00000004,
+};
+
+enum fw_caps_config_switch {
+ FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001,
+ FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002,
+};
+
+enum fw_caps_config_nic {
+ FW_CAPS_CONFIG_NIC = 0x00000001,
+ FW_CAPS_CONFIG_NIC_VM = 0x00000002,
+};
+
+enum fw_caps_config_ofld {
+ FW_CAPS_CONFIG_OFLD = 0x00000001,
+};
+
+enum fw_caps_config_rdma {
+ FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
+ FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
+};
+
+enum fw_caps_config_iscsi {
+ FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
+ FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
+ FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
+ FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
+};
+
+enum fw_caps_config_fcoe {
+ FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
+ FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
+};
+
+struct fw_caps_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 r2;
+ __be32 hwmbitmap;
+ __be16 nbmcaps;
+ __be16 linkcaps;
+ __be16 switchcaps;
+ __be16 r3;
+ __be16 niccaps;
+ __be16 ofldcaps;
+ __be16 rdmacaps;
+ __be16 r4;
+ __be16 iscsicaps;
+ __be16 fcoecaps;
+ __be32 r5;
+ __be64 r6;
+};
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+ FW_PARAMS_MNEM_DEV = 1, /* device params */
+ FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
+ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+ FW_PARAMS_MNEM_LAST
+};
+
+/*
+ * device parameters
+ */
+enum fw_params_param_dev {
+ FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
+ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
+ FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
+ FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04,
+ FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
+ FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
+ FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
+ FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
+ FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
+ FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00,
+ FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
+ FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
+ FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
+ FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
+ FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
+ FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
+ FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+ FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
+ FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
+ FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
+ FW_PARAMS_PARAM_PFVF_RQ_END = 0x10,
+ FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
+ FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
+ FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
+ FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+ FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+ FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+ FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
+ FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
+ FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+};
+
+#define FW_PARAMS_MNEM(x) ((x) << 24)
+#define FW_PARAMS_PARAM_X(x) ((x) << 16)
+#define FW_PARAMS_PARAM_Y(x) ((x) << 8)
+#define FW_PARAMS_PARAM_Z(x) ((x) << 0)
+#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
+#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
+
+struct fw_params_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ struct fw_params_param {
+ __be32 mnem;
+ __be32 val;
+ } param[7];
+};
+
+#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
+#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
+
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 cmask_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define FW_PFVF_CMD_PFN(x) ((x) << 8)
+#define FW_PFVF_CMD_VFN(x) ((x) << 0)
+
+#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
+#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
+
+#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
+#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
+#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf)
+
+#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
+#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf)
+
+#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
+#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_PFVF_CMD_TC(x) ((x) << 24)
+#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
+
+#define FW_PFVF_CMD_NVI(x) ((x) << 16)
+#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
+
+#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
+#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
+
+#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
+#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
+
+#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
+#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
+
+#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
+#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
+
+enum fw_iq_type {
+ FW_IQ_TYPE_FL_INT_CAP,
+ FW_IQ_TYPE_NO_FL_INT_CAP
+};
+
+struct fw_iq_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 physiqid;
+ __be16 iqid;
+ __be16 fl0id;
+ __be16 fl1id;
+ __be32 type_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_to_fl0congen;
+ __be16 fl0dcaen_to_fl0cidxfthresh;
+ __be16 fl0size;
+ __be64 fl0addr;
+ __be32 fl1cngchmap_to_fl1congen;
+ __be16 fl1dcaen_to_fl1cidxfthresh;
+ __be16 fl1size;
+ __be64 fl1addr;
+};
+
+#define FW_IQ_CMD_PFN(x) ((x) << 8)
+#define FW_IQ_CMD_VFN(x) ((x) << 0)
+
+#define FW_IQ_CMD_ALLOC (1U << 31)
+#define FW_IQ_CMD_FREE (1U << 30)
+#define FW_IQ_CMD_MODIFY (1U << 29)
+#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
+#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
+
+#define FW_IQ_CMD_TYPE(x) ((x) << 29)
+#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
+#define FW_IQ_CMD_VIID(x) ((x) << 16)
+#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
+#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
+#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
+#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
+
+#define FW_IQ_CMD_IQDROPRSS (1U << 15)
+#define FW_IQ_CMD_IQGTSMODE (1U << 14)
+#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
+#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
+#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
+#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
+#define FW_IQ_CMD_IQO (1U << 3)
+#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
+#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
+
+#define FW_IQ_CMD_IQNS(x) ((x) << 31)
+#define FW_IQ_CMD_IQRO(x) ((x) << 30)
+#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
+#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
+#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
+#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
+#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
+#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
+#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
+#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
+#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
+#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
+#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
+#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
+#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
+#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
+#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
+#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
+#define FW_IQ_CMD_FL0PADEN (1U << 2)
+#define FW_IQ_CMD_FL0PACKEN (1U << 1)
+#define FW_IQ_CMD_FL0CONGEN (1U << 0)
+
+#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
+#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
+#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
+#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
+#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
+#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
+
+#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
+#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
+#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
+#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
+#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
+#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
+#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
+#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
+#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
+#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
+#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
+#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
+#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
+#define FW_IQ_CMD_FL1PADEN (1U << 2)
+#define FW_IQ_CMD_FL1PACKEN (1U << 1)
+#define FW_IQ_CMD_FL1CONGEN (1U << 0)
+
+#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
+#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
+#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
+#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
+#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
+#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
+
+struct fw_eq_eth_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 viid_pkd;
+ __be32 r8_lo;
+ __be64 r9;
+};
+
+#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
+#define FW_EQ_ETH_CMD_FREE (1U << 30)
+#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
+#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
+#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
+#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
+#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
+#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
+#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
+#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
+
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
+
+#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
+#define FW_EQ_CTRL_CMD_FREE (1U << 30)
+#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
+#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
+#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
+#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
+#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
+#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
+#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
+#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
+
+struct fw_eq_ofld_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
+
+#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
+#define FW_EQ_OFLD_CMD_FREE (1U << 30)
+#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
+#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
+#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
+#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
+#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
+#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
+#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
+#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
+
+/*
+ * Macros for VIID parsing:
+ * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
+ */
+#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
+#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
+#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
+
+struct fw_vi_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 viid_pkd;
+ u8 mac[6];
+ u8 portid_pkd;
+ u8 nmac;
+ u8 nmac0[6];
+ __be16 rsssize_pkd;
+ u8 nmac1[6];
+ __be16 r7;
+ u8 nmac2[6];
+ __be16 r8;
+ u8 nmac3[6];
+ __be64 r9;
+ __be64 r10;
+};
+
+#define FW_VI_CMD_PFN(x) ((x) << 8)
+#define FW_VI_CMD_VFN(x) ((x) << 0)
+#define FW_VI_CMD_ALLOC (1U << 31)
+#define FW_VI_CMD_FREE (1U << 30)
+#define FW_VI_CMD_VIID(x) ((x) << 0)
+#define FW_VI_CMD_PORTID(x) ((x) << 4)
+#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC 0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
+#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+
+enum fw_vi_mac_smac {
+ FW_VI_MAC_MPS_TCAM_ENTRY,
+ FW_VI_MAC_MPS_TCAM_ONLY,
+ FW_VI_MAC_SMT_ONLY,
+ FW_VI_MAC_SMT_AND_MPSTCAM
+};
+
+enum fw_vi_mac_result {
+ FW_VI_MAC_R_SUCCESS,
+ FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
+ FW_VI_MAC_R_SMAC_FAIL,
+ FW_VI_MAC_R_F_ACL_CHECK
+};
+
+struct fw_vi_mac_cmd {
+ __be32 op_to_viid;
+ __be32 freemacs_to_len16;
+ union fw_vi_mac {
+ struct fw_vi_mac_exact {
+ __be16 valid_to_idx;
+ u8 macaddr[6];
+ } exact[7];
+ struct fw_vi_mac_hash {
+ __be64 hashvec;
+ } hash;
+ } u;
+};
+
+#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
+#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
+#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
+#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
+#define FW_VI_MAC_CMD_VALID (1U << 15)
+#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
+#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
+#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
+#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
+#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
+
+#define FW_RXMODE_MTU_NO_CHG 65535
+
+struct fw_vi_rxmode_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be32 mtu_to_broadcasten;
+ __be32 r4_lo;
+};
+
+#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
+#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
+#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
+
+struct fw_vi_enable_cmd {
+ __be32 op_to_viid;
+ __be32 ien_to_len16;
+ __be16 blinkdur;
+ __be16 r3;
+ __be32 r4;
+};
+
+#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
+#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
+#define FW_VI_ENABLE_CMD_LED (1U << 29)
+
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+enum fw_vi_stats_vf_index {
+ FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
+ FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_ERR_FRAMES_IX
+};
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS 17
+enum fw_vi_stats_pf_index {
+ FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BYTES_IX,
+ FW_VI_PF_STAT_RX_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_vi_stats {
+ struct fw_vi_stats_ctl {
+ __be16 nstats_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_vi_stats_pf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_pf_bytes;
+ __be64 rx_pf_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } pf;
+ struct fw_vi_stats_vf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } vf;
+ } u;
+};
+
+#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
+#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
+#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_acl_mac_cmd {
+ __be32 op_to_vfn;
+ __be32 en_to_len16;
+ u8 nmac;
+ u8 r3[7];
+ __be16 r4;
+ u8 macaddr0[6];
+ __be16 r5;
+ u8 macaddr1[6];
+ __be16 r6;
+ u8 macaddr2[6];
+ __be16 r7;
+ u8 macaddr3[6];
+};
+
+#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
+#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
+#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
+
+struct fw_acl_vlan_cmd {
+ __be32 op_to_vfn;
+ __be32 en_to_len16;
+ u8 nvlan;
+ u8 dropnovlan_fm;
+ u8 r3_lo[6];
+ __be16 vlanid[16];
+};
+
+#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
+#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
+#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
+#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
+#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
+
+enum fw_port_cap {
+ FW_PORT_CAP_SPEED_100M = 0x0001,
+ FW_PORT_CAP_SPEED_1G = 0x0002,
+ FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_10G = 0x0008,
+ FW_PORT_CAP_SPEED_40G = 0x0010,
+ FW_PORT_CAP_SPEED_100G = 0x0020,
+ FW_PORT_CAP_FC_RX = 0x0040,
+ FW_PORT_CAP_FC_TX = 0x0080,
+ FW_PORT_CAP_ANEG = 0x0100,
+ FW_PORT_CAP_MDI_0 = 0x0200,
+ FW_PORT_CAP_MDI_1 = 0x0400,
+ FW_PORT_CAP_BEAN = 0x0800,
+ FW_PORT_CAP_PMA_LPBK = 0x1000,
+ FW_PORT_CAP_PCS_LPBK = 0x2000,
+ FW_PORT_CAP_PHYXS_LPBK = 0x4000,
+ FW_PORT_CAP_FAR_END_LPBK = 0x8000,
+};
+
+enum fw_port_mdi {
+ FW_PORT_MDI_UNCHANGED,
+ FW_PORT_MDI_AUTO,
+ FW_PORT_MDI_F_STRAIGHT,
+ FW_PORT_MDI_F_CROSSOVER
+};
+
+#define FW_PORT_MDI(x) ((x) << 9)
+
+enum fw_port_action {
+ FW_PORT_ACTION_L1_CFG = 0x0001,
+ FW_PORT_ACTION_L2_CFG = 0x0002,
+ FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L2_PPP_CFG = 0x0004,
+ FW_PORT_ACTION_L2_DCB_CFG = 0x0005,
+ FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
+ FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011,
+ FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012,
+ FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020,
+ FW_PORT_ACTION_L1_LPBK = 0x0021,
+ FW_PORT_ACTION_L1_PMA_LPBK = 0x0022,
+ FW_PORT_ACTION_L1_PCS_LPBK = 0x0023,
+ FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024,
+ FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025,
+ FW_PORT_ACTION_PHY_RESET = 0x0040,
+ FW_PORT_ACTION_PMA_RESET = 0x0041,
+ FW_PORT_ACTION_PCS_RESET = 0x0042,
+ FW_PORT_ACTION_PHYXS_RESET = 0x0043,
+ FW_PORT_ACTION_DTEXS_REEST = 0x0044,
+ FW_PORT_ACTION_AN_RESET = 0x0045
+};
+
+enum fw_port_l2cfg_ctlbf {
+ FW_PORT_L2_CTLBF_OVLAN0 = 0x01,
+ FW_PORT_L2_CTLBF_OVLAN1 = 0x02,
+ FW_PORT_L2_CTLBF_OVLAN2 = 0x04,
+ FW_PORT_L2_CTLBF_OVLAN3 = 0x08,
+ FW_PORT_L2_CTLBF_IVLAN = 0x10,
+ FW_PORT_L2_CTLBF_TXIPG = 0x20
+};
+
+enum fw_port_dcb_cfg {
+ FW_PORT_DCB_CFG_PG = 0x01,
+ FW_PORT_DCB_CFG_PFC = 0x02,
+ FW_PORT_DCB_CFG_APPL = 0x04
+};
+
+enum fw_port_dcb_cfg_rc {
+ FW_PORT_DCB_CFG_SUCCESS = 0x0,
+ FW_PORT_DCB_CFG_ERROR = 0x1
+};
+
+struct fw_port_cmd {
+ __be32 op_to_portid;
+ __be32 action_to_len16;
+ union fw_port {
+ struct fw_port_l1cfg {
+ __be32 rcap;
+ __be32 r;
+ } l1cfg;
+ struct fw_port_l2cfg {
+ __be16 ctlbf_to_ivlan0;
+ __be16 ivlantype;
+ __be32 txipg_pkd;
+ __be16 ovlan0mask;
+ __be16 ovlan0type;
+ __be16 ovlan1mask;
+ __be16 ovlan1type;
+ __be16 ovlan2mask;
+ __be16 ovlan2type;
+ __be16 ovlan3mask;
+ __be16 ovlan3type;
+ } l2cfg;
+ struct fw_port_info {
+ __be32 lstatus_to_modtype;
+ __be16 pcap;
+ __be16 acap;
+ } info;
+ struct fw_port_ppp {
+ __be32 pppen_to_ncsich;
+ __be32 r11;
+ } ppp;
+ struct fw_port_dcb {
+ __be16 cfg;
+ u8 up_map;
+ u8 sf_cfgrc;
+ __be16 prot_ix;
+ u8 pe7_to_pe0;
+ u8 numTCPFCs;
+ __be32 pgid0_to_pgid7;
+ __be32 numTCs_oui;
+ u8 pgpc[8];
+ } dcb;
+ } u;
+};
+
+#define FW_PORT_CMD_READ (1U << 22)
+
+#define FW_PORT_CMD_PORTID(x) ((x) << 0)
+#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+
+#define FW_PORT_CMD_ACTION(x) ((x) << 16)
+
+#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
+#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
+#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
+#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
+#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
+#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
+
+#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
+
+#define FW_PORT_CMD_LSTATUS (1U << 31)
+#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
+#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
+#define FW_PORT_CMD_TXPAUSE (1U << 23)
+#define FW_PORT_CMD_RXPAUSE (1U << 22)
+#define FW_PORT_CMD_MDIOCAP (1U << 21)
+#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
+#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
+#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
+#define FW_PORT_CMD_PTYPE_MASK 0x1f
+#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
+#define FW_PORT_CMD_MODTYPE_MASK 0x1f
+#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
+
+#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
+#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
+#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
+
+#define FW_PORT_CMD_CH0(x) ((x) << 20)
+#define FW_PORT_CMD_CH1(x) ((x) << 16)
+#define FW_PORT_CMD_CH2(x) ((x) << 12)
+#define FW_PORT_CMD_CH3(x) ((x) << 8)
+#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
+
+enum fw_port_type {
+ FW_PORT_TYPE_FIBER,
+ FW_PORT_TYPE_KX4,
+ FW_PORT_TYPE_BT_SGMII,
+ FW_PORT_TYPE_KX,
+ FW_PORT_TYPE_BT_XAUI,
+ FW_PORT_TYPE_KR,
+ FW_PORT_TYPE_CX4,
+ FW_PORT_TYPE_TWINAX,
+
+ FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
+};
+
+enum fw_port_module_type {
+ FW_PORT_MOD_TYPE_NA,
+ FW_PORT_MOD_TYPE_LR,
+ FW_PORT_MOD_TYPE_SR,
+ FW_PORT_MOD_TYPE_ER,
+
+ FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS 50
+#define FW_NUM_PORT_TX_STATS 23
+#define FW_NUM_PORT_RX_STATS 27
+
+enum fw_port_stats_tx_index {
+ FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_FRAMES_IX,
+ FW_STAT_TX_PORT_BCAST_IX,
+ FW_STAT_TX_PORT_MCAST_IX,
+ FW_STAT_TX_PORT_UCAST_IX,
+ FW_STAT_TX_PORT_ERROR_IX,
+ FW_STAT_TX_PORT_64B_IX,
+ FW_STAT_TX_PORT_65B_127B_IX,
+ FW_STAT_TX_PORT_128B_255B_IX,
+ FW_STAT_TX_PORT_256B_511B_IX,
+ FW_STAT_TX_PORT_512B_1023B_IX,
+ FW_STAT_TX_PORT_1024B_1518B_IX,
+ FW_STAT_TX_PORT_1519B_MAX_IX,
+ FW_STAT_TX_PORT_DROP_IX,
+ FW_STAT_TX_PORT_PAUSE_IX,
+ FW_STAT_TX_PORT_PPP0_IX,
+ FW_STAT_TX_PORT_PPP1_IX,
+ FW_STAT_TX_PORT_PPP2_IX,
+ FW_STAT_TX_PORT_PPP3_IX,
+ FW_STAT_TX_PORT_PPP4_IX,
+ FW_STAT_TX_PORT_PPP5_IX,
+ FW_STAT_TX_PORT_PPP6_IX,
+ FW_STAT_TX_PORT_PPP7_IX
+};
+
+enum fw_port_stat_rx_index {
+ FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_FRAMES_IX,
+ FW_STAT_RX_PORT_BCAST_IX,
+ FW_STAT_RX_PORT_MCAST_IX,
+ FW_STAT_RX_PORT_UCAST_IX,
+ FW_STAT_RX_PORT_MTU_ERROR_IX,
+ FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_LEN_ERROR_IX,
+ FW_STAT_RX_PORT_SYM_ERROR_IX,
+ FW_STAT_RX_PORT_64B_IX,
+ FW_STAT_RX_PORT_65B_127B_IX,
+ FW_STAT_RX_PORT_128B_255B_IX,
+ FW_STAT_RX_PORT_256B_511B_IX,
+ FW_STAT_RX_PORT_512B_1023B_IX,
+ FW_STAT_RX_PORT_1024B_1518B_IX,
+ FW_STAT_RX_PORT_1519B_MAX_IX,
+ FW_STAT_RX_PORT_PAUSE_IX,
+ FW_STAT_RX_PORT_PPP0_IX,
+ FW_STAT_RX_PORT_PPP1_IX,
+ FW_STAT_RX_PORT_PPP2_IX,
+ FW_STAT_RX_PORT_PPP3_IX,
+ FW_STAT_RX_PORT_PPP4_IX,
+ FW_STAT_RX_PORT_PPP5_IX,
+ FW_STAT_RX_PORT_PPP6_IX,
+ FW_STAT_RX_PORT_PPP7_IX,
+ FW_STAT_RX_PORT_LESS_64B_IX
+};
+
+struct fw_port_stats_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ union fw_port_stats {
+ struct fw_port_stats_ctl {
+ u8 nstats_bg_bm;
+ u8 tx_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 tx_drop;
+ __be64 tx_pause;
+ __be64 tx_ppp0;
+ __be64 tx_ppp1;
+ __be64 tx_ppp2;
+ __be64 tx_ppp3;
+ __be64 tx_ppp4;
+ __be64 tx_ppp5;
+ __be64 tx_ppp6;
+ __be64 tx_ppp7;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be64 rx_bcast;
+ __be64 rx_mcast;
+ __be64 rx_ucast;
+ __be64 rx_mtu_error;
+ __be64 rx_mtu_crc_error;
+ __be64 rx_crc_error;
+ __be64 rx_len_error;
+ __be64 rx_sym_error;
+ __be64 rx_64b;
+ __be64 rx_65b_127b;
+ __be64 rx_128b_255b;
+ __be64 rx_256b_511b;
+ __be64 rx_512b_1023b;
+ __be64 rx_1024b_1518b;
+ __be64 rx_1519b_max;
+ __be64 rx_pause;
+ __be64 rx_ppp0;
+ __be64 rx_ppp1;
+ __be64 rx_ppp2;
+ __be64 rx_ppp3;
+ __be64 rx_ppp4;
+ __be64 rx_ppp5;
+ __be64 rx_ppp6;
+ __be64 rx_ppp7;
+ __be64 rx_less_64b;
+ __be64 rx_bg_drop;
+ __be64 rx_bg_trunc;
+ } all;
+ } u;
+};
+
+#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
+#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
+#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
+
+/* port loopback stats */
+#define FW_NUM_LB_STATS 16
+enum fw_port_lb_stats_index {
+ FW_STAT_LB_PORT_BYTES_IX,
+ FW_STAT_LB_PORT_FRAMES_IX,
+ FW_STAT_LB_PORT_BCAST_IX,
+ FW_STAT_LB_PORT_MCAST_IX,
+ FW_STAT_LB_PORT_UCAST_IX,
+ FW_STAT_LB_PORT_ERROR_IX,
+ FW_STAT_LB_PORT_64B_IX,
+ FW_STAT_LB_PORT_65B_127B_IX,
+ FW_STAT_LB_PORT_128B_255B_IX,
+ FW_STAT_LB_PORT_256B_511B_IX,
+ FW_STAT_LB_PORT_512B_1023B_IX,
+ FW_STAT_LB_PORT_1024B_1518B_IX,
+ FW_STAT_LB_PORT_1519B_MAX_IX,
+ FW_STAT_LB_PORT_DROP_FRAMES_IX
+};
+
+struct fw_port_lb_stats_cmd {
+ __be32 op_to_lbport;
+ __be32 retval_len16;
+ union fw_port_lb_stats {
+ struct fw_port_lb_stats_ctl {
+ u8 nstats_bg_bm;
+ u8 ix_pkd;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_lb_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 rx_lb_drop;
+ __be64 rx_lb_trunc;
+ } all;
+ } u;
+};
+
+#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
+#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
+#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_rss_ind_tbl_cmd {
+ __be32 op_to_viid;
+#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
+ __be32 retval_len16;
+ __be16 niqid;
+ __be16 startidx;
+ __be32 r3;
+ __be32 iq0_to_iq2;
+#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
+#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
+#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
+ __be32 iq3_to_iq5;
+ __be32 iq6_to_iq8;
+ __be32 iq9_to_iq11;
+ __be32 iq12_to_iq14;
+ __be32 iq15_to_iq17;
+ __be32 iq18_to_iq20;
+ __be32 iq21_to_iq23;
+ __be32 iq24_to_iq26;
+ __be32 iq27_to_iq29;
+ __be32 iq30_iq31;
+ __be32 r15_lo;
+};
+
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_pkd;
+ __be32 synmapen_to_hashtoeplitz;
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3)
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2)
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1)
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0)
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+
+struct fw_rss_vi_config_cmd {
+ __be32 op_to_viid;
+#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
+ __be32 retval_len16;
+ union fw_rss_vi_config {
+ struct fw_rss_vi_config_manual {
+ __be64 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_vi_config_basicvirtual {
+ __be32 r6;
+ __be32 defaultq_to_ip4udpen;
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
+#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0)
+ __be64 r9;
+ __be64 r10;
+ } basicvirtual;
+ } u;
+};
+
+enum fw_error_type {
+ FW_ERROR_TYPE_EXCEPTION = 0x0,
+ FW_ERROR_TYPE_HWMODULE = 0x1,
+ FW_ERROR_TYPE_WR = 0x2,
+ FW_ERROR_TYPE_ACL = 0x3,
+};
+
+struct fw_error_cmd {
+ __be32 op_to_type;
+ __be32 len16_pkd;
+ union fw_error {
+ struct fw_error_exception {
+ __be32 info[6];
+ } exception;
+ struct fw_error_hwmodule {
+ __be32 regaddr;
+ __be32 regval;
+ } hwmodule;
+ struct fw_error_wr {
+ __be16 cidx;
+ __be16 pfn_vfn;
+ __be32 eqid;
+ u8 wrhdr[16];
+ } wr;
+ struct fw_error_acl {
+ __be16 cidx;
+ __be16 pfn_vfn;
+ __be32 eqid;
+ __be16 mv_pkd;
+ u8 val[6];
+ __be64 r4;
+ } acl;
+ } u;
+};
+
+struct fw_debug_cmd {
+ __be32 op_type;
+#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
+ __be32 len16_pkd;
+ union fw_debug {
+ struct fw_debug_assert {
+ __be32 fcid;
+ __be32 line;
+ __be32 x;
+ __be32 y;
+ u8 filename_0_7[8];
+ u8 filename_8_15[8];
+ __be64 r3;
+ } assert;
+ struct fw_debug_prt {
+ __be16 dprtstridx;
+ __be16 r3[3];
+ __be32 dprtstrparam0;
+ __be32 dprtstrparam1;
+ __be32 dprtstrparam2;
+ __be32 dprtstrparam3;
+ } prt;
+ } u;
+};
+
+struct fw_hdr {
+ u8 ver;
+ u8 reserved1;
+ __be16 len512; /* bin length in units of 512-bytes */
+ __be32 fw_ver; /* firmware version */
+ __be32 tp_microcode_ver;
+ u8 intfver_nic;
+ u8 intfver_vnic;
+ u8 intfver_ofld;
+ u8 intfver_ri;
+ u8 intfver_iscsipdu;
+ u8 intfver_iscsi;
+ u8 intfver_fcoe;
+ u8 reserved2;
+ __be32 reserved3[27];
+};
+
+#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
+#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
+#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
+#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 34e03104c3c1..2b8edd2efbf6 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -29,10 +29,6 @@
* PHY layer usage
*/
-/** Pending Items in this driver:
- * 1. Use Linux cache infrastcture for DMA'ed memory (dma_xxx functions)
- */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -62,12 +58,11 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/davinci_emac.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <mach/emac.h>
-
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@ -465,6 +460,7 @@ struct emac_priv {
void __iomem *ctrl_base;
void __iomem *emac_ctrl_ram;
u32 ctrl_ram_size;
+ u32 hw_ram_addr;
struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
u32 link; /* 1=link on, 0=link off */
@@ -488,6 +484,9 @@ struct emac_priv {
struct mii_bus *mii_bus;
struct phy_device *phydev;
spinlock_t lock;
+ /*platform specific members*/
+ void (*int_enable) (void);
+ void (*int_disable) (void);
};
/* clock frequency for EMAC */
@@ -495,20 +494,12 @@ static struct clk *emac_clk;
static unsigned long emac_bus_frequency;
static unsigned long mdio_max_freq;
-/* EMAC internal utility function */
-static inline u32 emac_virt_to_phys(void __iomem *addr)
-{
- return (u32 __force) io_v2p(addr);
-}
+#define emac_virt_to_phys(addr, priv) \
+ (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
+ + priv->hw_ram_addr)
/* Cache macros - Packet buffers would be from skb pool which is cached */
#define EMAC_VIRT_NOCACHE(addr) (addr)
-#define EMAC_CACHE_INVALIDATE(addr, size) \
- dma_cache_maint((void *)addr, size, DMA_FROM_DEVICE)
-#define EMAC_CACHE_WRITEBACK(addr, size) \
- dma_cache_maint((void *)addr, size, DMA_TO_DEVICE)
-#define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \
- dma_cache_maint((void *)addr, size, DMA_BIDIRECTIONAL)
/* DM644x does not have BD's in cached memory - so no cache functions */
#define BD_CACHE_INVALIDATE(addr, size)
@@ -956,19 +947,18 @@ static void emac_dev_mcast_set(struct net_device *ndev)
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) {
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
- if (ndev->mc_count > 0) {
+ if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mc_ptr;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
/* program multicast address list into EMAC hardware */
- for (mc_ptr = ndev->mc_list; mc_ptr;
- mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, ndev) {
emac_add_mcast(priv, EMAC_MULTICAST_ADD,
- (u8 *)mc_ptr->dmi_addr);
+ (u8 *) mc_ptr->dmi_addr);
}
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -1002,6 +992,8 @@ static void emac_int_disable(struct emac_priv *priv)
emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
/* NOTE: Rx Threshold and Misc interrupts are not disabled */
+ if (priv->int_disable)
+ priv->int_disable();
local_irq_restore(flags);
@@ -1021,6 +1013,9 @@ static void emac_int_disable(struct emac_priv *priv)
static void emac_int_enable(struct emac_priv *priv)
{
if (priv->version == EMAC_VERSION_2) {
+ if (priv->int_enable)
+ priv->int_enable();
+
emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
@@ -1230,6 +1225,10 @@ static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
if (1 == txch->queue_active) {
curr_bd = txch->active_queue_head;
while (curr_bd != NULL) {
+ dma_unmap_single(emac_dev, curr_bd->buff_ptr,
+ curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
+ DMA_TO_DEVICE);
+
emac_net_tx_complete(priv, (void __force *)
&curr_bd->buf_token, 1, ch);
if (curr_bd != txch->active_queue_tail)
@@ -1302,7 +1301,7 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
curr_bd = txch->active_queue_head;
if (NULL == curr_bd) {
emac_write(EMAC_TXCP(ch),
- emac_virt_to_phys(txch->last_hw_bdprocessed));
+ emac_virt_to_phys(txch->last_hw_bdprocessed, priv));
txch->no_active_pkts++;
spin_unlock_irqrestore(&priv->tx_lock, flags);
return 0;
@@ -1312,7 +1311,7 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
while ((curr_bd) &&
((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
(pkts_processed < budget)) {
- emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd));
+ emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
txch->active_queue_head = curr_bd->next;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
if (curr_bd->next) { /* misqueued packet */
@@ -1322,6 +1321,11 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
txch->queue_active = 0; /* end of queue */
}
}
+
+ dma_unmap_single(emac_dev, curr_bd->buff_ptr,
+ curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
+ DMA_TO_DEVICE);
+
*tx_complete_ptr = (u32) curr_bd->buf_token;
++tx_complete_ptr;
++tx_complete_cnt;
@@ -1382,8 +1386,8 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
txch->bd_pool_head = curr_bd->next;
curr_bd->buf_token = buf_list->buf_token;
- /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */
- curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr);
+ curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
+ buf_list->length, DMA_TO_DEVICE);
curr_bd->off_b_len = buf_list->length;
curr_bd->h_next = 0;
curr_bd->next = NULL;
@@ -1399,7 +1403,7 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
txch->active_queue_tail = curr_bd;
if (1 != txch->queue_active) {
emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd));
+ emac_virt_to_phys(curr_bd, priv));
txch->queue_active = 1;
}
++txch->queue_reinit;
@@ -1411,10 +1415,11 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
tail_bd->next = curr_bd;
txch->active_queue_tail = curr_bd;
tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = (int)emac_virt_to_phys(curr_bd);
+ tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
frame_status = tail_bd->mode;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_TXHDP(ch), emac_virt_to_phys(curr_bd));
+ emac_write(EMAC_TXHDP(ch),
+ emac_virt_to_phys(curr_bd, priv));
frame_status &= ~(EMAC_CPPI_EOQ_BIT);
tail_bd->mode = frame_status;
++txch->end_of_queue_add;
@@ -1462,7 +1467,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_buf.length = skb->len;
tx_buf.buf_token = (void *)skb;
tx_buf.data_ptr = skb->data;
- EMAC_CACHE_WRITEBACK((unsigned long)skb->data, skb->len);
ndev->trans_start = jiffies;
ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
if (unlikely(ret_code != 0)) {
@@ -1537,7 +1541,6 @@ static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
p_skb->dev = ndev;
skb_reserve(p_skb, NET_IP_ALIGN);
*data_token = (void *) p_skb;
- EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p_skb->data, buf_size);
return p_skb->data;
}
@@ -1604,9 +1607,10 @@ static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
}
/* populate the hardware descriptor */
- curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head);
- /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */
- curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr);
+ curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
+ priv);
+ curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
+ rxch->buf_size, DMA_FROM_DEVICE);
curr_bd->off_b_len = rxch->buf_size;
curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
@@ -1690,6 +1694,12 @@ static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
curr_bd = rxch->active_queue_head;
while (curr_bd) {
if (curr_bd->buf_token) {
+ dma_unmap_single(&priv->ndev->dev,
+ curr_bd->buff_ptr,
+ curr_bd->off_b_len
+ & EMAC_RX_BD_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
dev_kfree_skb_any((struct sk_buff *)\
curr_bd->buf_token);
}
@@ -1864,8 +1874,8 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
/* populate the hardware descriptor */
curr_bd->h_next = 0;
- /* FIXME buff_ptr = dma_map_single(... buffer ...) */
- curr_bd->buff_ptr = virt_to_phys(buffer);
+ curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
+ rxch->buf_size, DMA_FROM_DEVICE);
curr_bd->off_b_len = rxch->buf_size;
curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
curr_bd->next = NULL;
@@ -1879,7 +1889,7 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
rxch->active_queue_tail = curr_bd;
if (0 != rxch->queue_active) {
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head));
+ emac_virt_to_phys(rxch->active_queue_head, priv));
rxch->queue_active = 1;
}
} else {
@@ -1890,11 +1900,11 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
rxch->active_queue_tail = curr_bd;
tail_bd->next = curr_bd;
tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = emac_virt_to_phys(curr_bd);
+ tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
frame_status = tail_bd->mode;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd));
+ emac_virt_to_phys(curr_bd, priv));
frame_status &= ~(EMAC_CPPI_EOQ_BIT);
tail_bd->mode = frame_status;
++rxch->end_of_queue_add;
@@ -1920,7 +1930,6 @@ static int emac_net_rx_cb(struct emac_priv *priv,
p_skb = (struct sk_buff *)net_pkt_list->pkt_token;
/* set length of packet */
skb_put(p_skb, net_pkt_list->pkt_length);
- EMAC_CACHE_INVALIDATE((unsigned long)p_skb->data, p_skb->len);
p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
netif_receive_skb(p_skb);
priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length;
@@ -1983,11 +1992,16 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
rx_buf_obj->buf_token = curr_bd->buf_token;
+
+ dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
+ curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
curr_pkt->num_bufs = 1;
curr_pkt->pkt_length =
(frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
- emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd));
+ emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
++rxch->processed_bd;
last_bd = curr_bd;
curr_bd = last_bd->next;
@@ -1998,7 +2012,7 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
if (curr_bd) {
++rxch->mis_queued_packets;
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd));
+ emac_virt_to_phys(curr_bd, priv));
} else {
++rxch->end_of_queue;
rxch->queue_active = 0;
@@ -2099,7 +2113,7 @@ static int emac_hw_enable(struct emac_priv *priv)
emac_write(EMAC_RXINTMASKSET, BIT(ch));
rxch->queue_active = 1;
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head));
+ emac_virt_to_phys(rxch->active_queue_head, priv));
}
/* Enable MII */
@@ -2378,7 +2392,7 @@ static int emac_dev_open(struct net_device *ndev)
struct emac_priv *priv = netdev_priv(ndev);
netif_carrier_off(ndev);
- for (cnt = 0; cnt <= ETH_ALEN; cnt++)
+ for (cnt = 0; cnt < ETH_ALEN; cnt++)
ndev->dev_addr[cnt] = priv->mac_addr[cnt];
/* Configuration items */
@@ -2651,7 +2665,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
- printk(KERN_ERR "DaVinci EMAC: No platfrom data\n");
+ printk(KERN_ERR "DaVinci EMAC: No platform data\n");
return -ENODEV;
}
@@ -2660,6 +2674,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->phy_mask = pdata->phy_mask;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
+ priv->int_enable = pdata->interrupt_enable;
+ priv->int_disable = pdata->interrupt_disable;
+
emac_dev = &ndev->dev;
/* Get EMAC platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2672,8 +2689,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = res->end - res->start + 1;
if (!request_mem_region(res->start, size, ndev->name)) {
- dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() \
- for regs\n");
+ dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
rc = -ENXIO;
goto probe_quit;
}
@@ -2692,6 +2708,12 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->ctrl_ram_size = pdata->ctrl_ram_size;
priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
+ if (pdata->hw_ram_addr)
+ priv->hw_ram_addr = pdata->hw_ram_addr;
+ else
+ priv->hw_ram_addr = (u32 __force)res->start +
+ pdata->ctrl_ram_offset;
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n");
@@ -2711,6 +2733,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_ETHTOOL_OPS(ndev, &ethtool_ops);
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ clk_enable(emac_clk);
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
@@ -2720,7 +2744,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
goto netdev_reg_err;
}
- clk_enable(emac_clk);
/* MII/Phy intialisation, mdio bus registration */
emac_mii = mdiobus_alloc();
@@ -2760,6 +2783,7 @@ mdiobus_quit:
netdev_reg_err:
mdio_alloc_err:
+ clk_disable(emac_clk);
no_irq_res:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
@@ -2803,31 +2827,37 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
return 0;
}
-static
-int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state)
+static int davinci_emac_suspend(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
- if (netif_running(dev))
- emac_dev_stop(dev);
+ if (netif_running(ndev))
+ emac_dev_stop(ndev);
clk_disable(emac_clk);
return 0;
}
-static int davinci_emac_resume(struct platform_device *pdev)
+static int davinci_emac_resume(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
clk_enable(emac_clk);
- if (netif_running(dev))
- emac_dev_open(dev);
+ if (netif_running(ndev))
+ emac_dev_open(ndev);
return 0;
}
+static const struct dev_pm_ops davinci_emac_pm_ops = {
+ .suspend = davinci_emac_suspend,
+ .resume = davinci_emac_resume,
+};
+
/**
* davinci_emac_driver: EMAC platform driver structure
*/
@@ -2835,11 +2865,10 @@ static struct platform_driver davinci_emac_driver = {
.driver = {
.name = "davinci_emac",
.owner = THIS_MODULE,
+ .pm = &davinci_emac_pm_ops,
},
.probe = davinci_emac_probe,
.remove = __devexit_p(davinci_emac_remove),
- .suspend = davinci_emac_suspend,
- .resume = davinci_emac_resume,
};
/**
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 45794f6cb0f6..a0a6830b5e6d 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -464,7 +464,7 @@ static int de620_close(struct net_device *dev)
static void de620_set_multicast_list(struct net_device *dev)
{
- if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{ /* Enable promiscuous mode */
de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
}
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index be9590253aa1..8cf3cc6f20e2 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -940,9 +940,8 @@ static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
/* set all multicast bits */
@@ -960,9 +959,8 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a3..ed53a8d45f4e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
- pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, "
- "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
+ pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
print_name, board_name, dfx_use_mmio ? "" : "I/O ",
- (long long)bar_start, dev->irq,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ (long long)bar_start, dev->irq, dev->dev_addr);
/*
* Get memory for descriptor block, consumer block, and other buffers
@@ -2230,7 +2227,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
* perfect filtering will be used.
*/
- if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
+ if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
{
bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
bp->mc_count = 0; /* Don't add mc addrs to CAM */
@@ -2238,17 +2235,16 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
else
{
bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
- bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */
+ bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
}
/* Copy addresses to multicast address table, then update adapter CAM */
- dmi = dev->mc_list; /* point to first multicast addr */
- for (i=0; i < bp->mc_count; i++)
- {
- memcpy(&bp->mc_table[i*FDDI_K_ALEN], dmi->dmi_addr, FDDI_K_ALEN);
- dmi = dmi->next; /* point to next multicast addr */
- }
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
+ dmi->dmi_addr, FDDI_K_ALEN);
+
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
DBG_printk("%s: Could not update multicast address table!\n", dev->name);
@@ -3631,7 +3627,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
const struct pci_device_id *);
static void __devexit dfx_pci_unregister(struct pci_dev *);
-static struct pci_device_id dfx_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
{ }
};
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 0c1f491d20bf..744c1928dfca 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1272,7 +1272,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i, j, bit, byte;
u16 hashcode;
@@ -1287,9 +1287,8 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc(ETH_ALEN, addrs);
hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 2a8b6a7c0b87..b05bad829827 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1128,19 +1128,16 @@ set_multicast (struct net_device *dev)
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > multicast_filter_limit)) {
+ (netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
- } else if (dev->mc_count > 0) {
- int i;
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist=mclist->next)
- {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit, index = 0;
int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
/* The inverted high significant 6 bits of CRC are
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca8..7caab3d26a9e 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
driver_data Data private to the driver.
*/
-static const struct pci_device_id rio_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
{ }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index b37730065688..7f9960f718e3 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
+#include <linux/slab.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -724,8 +725,7 @@ static void
dm9000_hash_table(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
- struct dev_mc_list *mcptr = dev->mc_list;
- int mc_cnt = dev->mc_count;
+ struct dev_mc_list *mcptr;
int i, oft;
u32 hash_val;
u16 hash_table[4];
@@ -753,7 +753,7 @@ dm9000_hash_table(struct net_device *dev)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
- for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d3..791080303db1 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -166,6 +166,7 @@
#include <linux/ethtool.h>
#include <linux/string.h>
#include <linux/firmware.h>
+#include <linux/rtnetlink.h>
#include <asm/unaligned.h>
@@ -208,7 +209,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
-static struct pci_device_id e100_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
@@ -1537,14 +1538,18 @@ static int e100_hw_init(struct nic *nic)
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
- struct dev_mc_list *list = netdev->mc_list;
- u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
+ struct dev_mc_list *list;
+ u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
- for (i = 0; list && i < count; i++, list = list->next)
- memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
+ i = 0;
+ netdev_for_each_mc_addr(list, netdev) {
+ if (i == count)
+ break;
+ memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
ETH_ALEN);
+ }
}
static void e100_set_multicast_list(struct net_device *netdev)
@@ -1552,7 +1557,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
struct nic *nic = netdev_priv(netdev);
DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev->mc_count, netdev->flags);
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1560,7 +1565,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
nic->flags &= ~promiscuous;
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
+ netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
nic->flags |= multicast_all;
else
nic->flags &= ~multicast_all;
@@ -2261,8 +2266,13 @@ static void e100_tx_timeout_task(struct work_struct *work)
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
ioread8(&nic->csr->scb.status));
- e100_down(netdev_priv(netdev));
- e100_up(netdev_priv(netdev));
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ e100_down(netdev_priv(netdev));
+ e100_up(netdev_priv(netdev));
+ }
+ rtnl_unlock();
}
static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
@@ -2854,7 +2864,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
}
nic->cbs_pool = pci_pool_create(netdev->name,
nic->pdev,
- nic->params.cbs.count * sizeof(struct cb),
+ nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2a567df3ea71..2f29c2131851 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -261,7 +261,6 @@ struct e1000_adapter {
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
unsigned int restart_queue;
- unsigned long tx_queue_len;
u32 txd_cmd;
u32 tx_int_delay;
u32 tx_abs_int_delay;
@@ -326,6 +325,8 @@ struct e1000_adapter {
/* for ioport free */
int bars;
int need_ioport;
+
+ bool discarding;
};
enum e1000_state_t {
@@ -347,6 +348,7 @@ extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 13e9ece16889..c67e93117271 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -215,6 +215,23 @@ static int e1000_set_settings(struct net_device *netdev,
return 0;
}
+static u32 e1000_get_link(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ adapter->hw.get_link_status = 1;
+
+ return e1000_has_link(adapter);
+}
+
static void e1000_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -1892,7 +1909,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = e1000_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7e855f9bbd97..b15ece26ed84 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
* Macro expands to...
* {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x1000),
INTEL_E1000_ETHERNET_DEVICE(0x1001),
INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter)
adapter->alloc_rx_buf(adapter, ring,
E1000_DESC_UNUSED(ring));
}
-
- adapter->tx_queue_len = netdev->tx_queue_len;
}
int e1000_up(struct e1000_adapter *adapter)
@@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
adapter->link_speed = 0;
adapter->link_duplex = 0;
netif_carrier_off(netdev);
@@ -847,6 +844,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_pci_reg;
pci_set_master(pdev);
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
- case E1000_RXBUFFER_256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case E1000_RXBUFFER_512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case E1000_RXBUFFER_1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -2139,7 +2127,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_VFE;
}
- if (netdev->uc.count > rar_entries - 1) {
+ if (netdev_uc_count(netdev) > rar_entries - 1) {
rctl |= E1000_RCTL_UPE;
} else if (!(netdev->flags & IFF_PROMISC)) {
rctl &= ~E1000_RCTL_UPE;
@@ -2162,7 +2150,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
*/
i = 1;
if (use_uc)
- list_for_each_entry(ha, &netdev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
if (i == rar_entries)
break;
e1000_rar_set(hw, ha->addr, i++);
@@ -2170,29 +2158,25 @@ static void e1000_set_rx_mode(struct net_device *netdev)
WARN_ON(i == rar_entries);
- mc_ptr = netdev->mc_list;
-
- for (; i < rar_entries; i++) {
- if (mc_ptr) {
- e1000_rar_set(hw, mc_ptr->da_addr, i);
- mc_ptr = mc_ptr->next;
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
+ if (i == rar_entries) {
+ /* load any remaining addresses into the hash table */
+ u32 hash_reg, hash_bit, mta;
+ hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+ mta = (1 << hash_bit);
+ mcarray[hash_reg] |= mta;
} else {
- E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
- E1000_WRITE_FLUSH();
- E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
- E1000_WRITE_FLUSH();
+ e1000_rar_set(hw, mc_ptr->da_addr, i++);
}
}
- /* load any remaining addresses into the hash table */
-
- for (; mc_ptr; mc_ptr = mc_ptr->next) {
- u32 hash_reg, hash_bit, mta;
- hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
- hash_reg = (hash_value >> 5) & 0x7F;
- hash_bit = hash_value & 0x1F;
- mta = (1 << hash_bit);
- mcarray[hash_reg] |= mta;
+ for (; i < rar_entries; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH();
}
/* write the hash table completely, write from bottom to avoid
@@ -2258,7 +2242,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
}
}
-static bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
@@ -2329,19 +2313,15 @@ static void e1000_watchdog(unsigned long data)
E1000_CTRL_RFCE) ? "RX" : ((ctrl &
E1000_CTRL_TFCE) ? "TX" : "None" )));
- /* tweak tx_queue_len according to speed/duplex
- * and adjust the timeout factor */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
txb2b = false;
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = false;
- netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
@@ -2802,13 +2782,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
}
@@ -3176,13 +3156,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* however with the new *_jumbo_rx* routines, jumbo receives will use
* fragmented skbs */
- if (max_frame <= E1000_RXBUFFER_256)
- adapter->rx_buffer_len = E1000_RXBUFFER_256;
- else if (max_frame <= E1000_RXBUFFER_512)
- adapter->rx_buffer_len = E1000_RXBUFFER_512;
- else if (max_frame <= E1000_RXBUFFER_1024)
- adapter->rx_buffer_len = E1000_RXBUFFER_1024;
- else if (max_frame <= E1000_RXBUFFER_2048)
+ if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
else
#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@@ -3850,13 +3824,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
- * packet, also make sure the frame isn't just CRC only */
- if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
+ * packet, if thats the case we need to toss it. In fact, we
+ * to toss every packet with the EOP bit clear and the next
+ * frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->discarding = true;
+
+ if (adapter->discarding) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
/* recycle */
buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->discarding = false;
goto next_desc;
}
@@ -4015,11 +3998,21 @@ check_page:
}
}
- if (!buffer_info->dma)
+ if (!buffer_info->dma) {
buffer_info->dma = pci_map_page(pdev,
buffer_info->page, 0,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ put_page(buffer_info->page);
+ dev_kfree_skb(skb);
+ buffer_info->page = NULL;
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ }
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4110,6 +4103,13 @@ map_skb:
skb->data,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
/*
* XXX if it was allocated cleanly it will never map to a
@@ -4605,6 +4605,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (adapter->need_ioport)
err = pci_enable_device(pdev);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b979464091bb..90155552ea09 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* check for link */
switch (hw->phy.media_type) {
@@ -265,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
switch (hw->mac.type) {
+ case e1000_82573:
+ func->set_lan_id = e1000_set_lan_id_single_port;
+ func->check_mng_mode = e1000e_check_mng_mode_generic;
+ func->led_on = e1000e_led_on_generic;
+ break;
case e1000_82574:
case e1000_82583:
+ func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000_check_mng_mode_82574;
func->led_on = e1000_led_on_82574;
break;
@@ -328,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
static int global_quad_port_a; /* global port a indication */
struct pci_dev *pdev = adapter->pdev;
- u16 eeprom_data = 0;
int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
s32 rc;
@@ -379,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
adapter->flags &= ~FLAG_HAS_WOL;
break;
-
case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ /* Disable ASPM L0s due to hardware errata */
+ e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
+
if (pdev->device == E1000_DEV_ID_82573L) {
- if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
- &eeprom_data) < 0)
- break;
- if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
- adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
- adapter->max_hw_frame_size = DEFAULT_JUMBO;
- }
+ adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = DEFAULT_JUMBO;
}
break;
default:
@@ -920,9 +926,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- if (hw->mac.type == e1000_82571 &&
- hw->dev_spec.e82571.alt_mac_addr_is_present)
- e1000e_set_laa_state_82571(hw, true);
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_set_laa_state_82571(hw, true);
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1223,32 +1232,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
}
/**
- * e1000_update_mc_addr_list_82571 - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
- *
- * Updates the Receive Address Registers and Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
- **/
-static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count)
-{
- if (e1000e_get_laa_state_82571(hw))
- rar_count--;
-
- e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
-}
-
-/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
@@ -1361,7 +1344,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
*
* 1) down
* 2) autoneg_progress
- * 3) autoneg_complete (the link sucessfully autonegotiated)
+ * 3) autoneg_complete (the link successfully autonegotiated)
* 4) forced_up (the link has been forced up, it did not autonegotiate)
*
**/
@@ -1619,6 +1602,29 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
}
/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_82571 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1693,10 +1699,11 @@ static struct e1000_mac_operations e82571_mac_ops = {
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
/* .get_link_up_info: media type dependent */
/* .led_on: mac type dependent */
.led_off = e1000e_led_off_generic,
- .update_mc_addr_list = e1000_update_mc_addr_list_82571,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.write_vfta = e1000_write_vfta_generic,
.clear_vfta = e1000_clear_vfta_82571,
.reset_hw = e1000_reset_hw_82571,
@@ -1704,6 +1711,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
.setup_led = e1000e_setup_led_generic,
+ .read_mac_addr = e1000_read_mac_addr_82571,
};
static struct e1000_phy_operations e82_phy_ops_igp = {
@@ -1782,6 +1790,7 @@ struct e1000_info e1000_82571_info = {
| FLAG_RESET_OVERWRITES_LAA /* errata */
| FLAG_TARC_SPEED_MODE_BIT /* errata */
| FLAG_APME_CHECK_PORT_B,
+ .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -1799,6 +1808,7 @@ struct e1000_info e1000_82572_info = {
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
+ .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -1810,13 +1820,11 @@ struct e1000_info e1000_82572_info = {
struct e1000_info e1000_82573_info = {
.mac = e1000_82573,
.flags = FLAG_HAS_HW_VLAN_FILTER
- | FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
- | FLAG_HAS_ERT
| FLAG_HAS_SWSM_ON_LOAD,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e02e38221ed4..e301e26d6897 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -320,6 +320,8 @@
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
#define E1000_RFCTL_ACK_DIS 0x00001000
#define E1000_RFCTL_EXTEN 0x00008000
#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
@@ -460,6 +462,8 @@
*/
#define E1000_RAR_ENTRIES 15
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define E1000_ERR_NVM 1
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cebbd9079d53..ee32b9b27a9f 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -37,6 +37,7 @@
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include "hw.h"
@@ -279,7 +280,6 @@ struct e1000_adapter {
struct napi_struct napi;
- unsigned long tx_queue_len;
unsigned int restart_queue;
u32 txd_cmd;
@@ -375,7 +375,7 @@ struct e1000_adapter {
struct e1000_info {
enum e1000_mac_type mac;
unsigned int flags;
- unsigned int flags2;
+ unsigned int flags2;
u32 pba;
u32 max_hw_frame_size;
s32 (*get_variants)(struct e1000_adapter *);
@@ -421,6 +421,8 @@ struct e1000_info {
/* CRC Stripping defines */
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
+#define FLAG2_IS_DISCARDING (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -458,9 +460,10 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
+extern bool e1000e_has_link(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
extern unsigned int copybreak;
@@ -502,6 +505,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
@@ -516,9 +521,7 @@ extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count);
+ u32 mc_addr_count);
extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -529,6 +532,7 @@ extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
extern s32 e1000e_blink_led(struct e1000_hw *hw);
extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
extern void e1000e_reset_adaptive(struct e1000_hw *hw);
extern void e1000e_update_adaptive(struct e1000_hw *hw);
@@ -582,7 +586,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 data);
-extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
@@ -629,7 +632,15 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16
extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm(struct e1000_hw *hw);
-extern s32 e1000e_read_mac_addr(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 3028f23da891..27d21589a69a 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
/* check for link */
switch (hw->phy.media_type) {
@@ -244,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
break;
}
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
return 0;
}
@@ -812,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- return 0;
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
}
/**
@@ -1338,6 +1345,29 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
}
/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1401,12 +1431,14 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
}
static struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
.id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
.get_link_up_info = e1000_get_link_up_info_80003es2lan,
.led_on = e1000e_led_on_generic,
.led_off = e1000e_led_off_generic,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 0aa50c229c79..983493f2330c 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -31,6 +31,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include "e1000.h"
@@ -202,7 +203,7 @@ static u32 e1000_get_link(struct net_device *netdev)
if (!netif_carrier_ok(netdev))
mac->get_link_status = 1;
- return e1000_has_link(adapter);
+ return e1000e_has_link(adapter);
}
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2784cf44a6f3..8bdcd5f24eff 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -389,6 +389,9 @@ enum e1e_registers {
#define E1000_FUNC_1 1
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
enum e1000_mac_type {
e1000_82571,
e1000_82572,
@@ -746,16 +749,18 @@ struct e1000_mac_operations {
void (*clear_hw_cntrs)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
- void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
};
/* Function pointers for the PHY. */
@@ -814,10 +819,15 @@ struct e1000_mac_info {
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
+ bool adaptive_ifs;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
@@ -896,7 +906,6 @@ struct e1000_fc_info {
struct e1000_dev_spec_82571 {
bool laa_is_present;
- bool alt_mac_addr_is_present;
u32 smb_counter;
};
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9b09246af064..8b5e157e9c87 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -138,6 +138,10 @@
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -219,6 +223,7 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -270,7 +275,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
- e1000e_get_phy_id(hw);
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ /*
+ * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ }
phy->type = e1000e_get_phy_type_from_id(phy->id);
switch (phy->type) {
@@ -292,6 +311,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
break;
}
+out:
return ret_val;
}
@@ -454,6 +474,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count--;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = true;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* LED operations */
switch (mac->type) {
@@ -1074,16 +1096,44 @@ out:
/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 phy_data;
if (hw->mac.type != e1000_pchlan)
return ret_val;
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
if (((hw->phy.type == e1000_phy_82577) &&
((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
@@ -1116,16 +1166,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
hw->phy.addr = 1;
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
if (ret_val)
goto out;
- hw->phy.ops.release(hw);
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ goto out;
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ phy_data & 0x00FF);
+release:
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
@@ -1182,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
/* Allow time for h/w to get to a quiescent state after reset */
mdelay(10);
+ /* Perform any necessary post-reset workarounds */
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
@@ -2482,6 +2549,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
+ /* Perform any necessary post-reset workarounds */
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+
if (ctrl & E1000_CTRL_PHY_RST)
ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -2526,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
ew32(KABGTXD, kab);
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
out:
return ret_val;
}
@@ -2672,6 +2740,16 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
reg &= ~(1 << 31);
ew32(STATUS, reg);
}
+
+ /*
+ * work-around descriptor data corruption issue during nfs v2 udp
+ * traffic, just disable the nfs filtering capability
+ */
+ reg = er32(RFCTL);
+ reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+ ew32(RFCTL, reg);
+
+ return;
}
/**
@@ -3300,6 +3378,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
.get_bus_info = e1000_get_bus_info_ich8lan,
+ .set_lan_id = e1000_set_lan_id_single_port,
.get_link_up_info = e1000_get_link_up_info_ich8lan,
/* led_on dependent on mac type */
/* led_off dependent on mac type */
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a86c17548c1e..a8b2c0de27c4 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -51,10 +51,10 @@ enum e1000_mng_mode {
**/
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
{
+ struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
struct e1000_adapter *adapter = hw->adapter;
- u32 status;
- u16 pcie_link_status, pci_header_type, cap_offset;
+ u16 pcie_link_status, cap_offset;
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (!cap_offset) {
@@ -68,20 +68,46 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
PCIE_LINK_WIDTH_SHIFT);
}
- pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
- &pci_header_type);
- if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
- status = er32(STATUS);
- bus->func = (status & E1000_STATUS_FUNC_MASK)
- >> E1000_STATUS_FUNC_SHIFT;
- } else {
- bus->func = 0;
- }
+ mac->ops.set_lan_id(hw);
return 0;
}
/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /*
+ * The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = er32(STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
* e1000_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to the HW structure
*
@@ -125,6 +151,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
+ u8 mac_addr[ETH_ALEN] = {0};
/* Setup the receive address */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -133,12 +160,70 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
/* Zero out the other (rar_entry_count - 1) receive addresses */
e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
- for (i = 1; i < rar_count; i++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
- e1e_flush();
+ for (i = 1; i < rar_count; i++)
+ e1000e_rar_set(hw, mac_addr, i);
+}
+
+/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val = 0;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
+ ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_alt_mac_addr_offset == 0xFFFF) {
+ /* There is no Alternate MAC Address */
+ goto out;
+ }
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+ goto out;
}
+
+ /*
+ * We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ e1000e_rar_set(hw, alt_mac_addr, 0);
+
+out:
+ return ret_val;
}
/**
@@ -164,10 +249,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
- rar_high |= E1000_RAH_AV;
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+ /*
+ * Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
}
/**
@@ -246,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
**/
void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
- u32 i;
- u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
- if (!mcarray) {
- printk(KERN_ERR "multicast array memory allocation failed\n");
- return;
- }
-
- /*
- * Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = rar_used_count; i < rar_count; i++) {
- if (mc_addr_count) {
- e1000e_rar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
- e1e_flush();
- }
- }
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- /* Load any remaining multicast addresses into the hash table. */
- for (; mc_addr_count > 0; mc_addr_count--) {
- u32 hash_value, hash_reg, hash_bit, mta;
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
- e_dbg("Hash value = 0x%03X\n", hash_value);
+
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- mta = (1 << hash_bit);
- mcarray[hash_reg] |= mta;
- mc_addr_list += ETH_ALEN;
- }
- /* write the hash table completely */
- for (i = 0; i < hw->mac.mta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
e1e_flush();
- kfree(mcarray);
}
/**
@@ -581,7 +647,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
if (!(rxcw & E1000_RXCW_IV)) {
mac->serdes_has_link = true;
e_dbg("SERDES: Link up - autoneg "
- "completed sucessfully.\n");
+ "completed successfully.\n");
} else {
mac->serdes_has_link = false;
e_dbg("SERDES: Link down - invalid"
@@ -1609,6 +1675,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
mac->current_ifs_val = 0;
mac->ifs_min_val = IFS_MIN;
mac->ifs_max_val = IFS_MAX;
@@ -1617,6 +1688,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
mac->in_ifs_mode = false;
ew32(AIT, 0);
+out:
+ return;
}
/**
@@ -1630,6 +1703,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
if (mac->tx_packet_delta > MIN_NUM_XMITS) {
mac->in_ifs_mode = true;
@@ -1650,6 +1728,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
ew32(AIT, 0);
}
}
+out:
+ return;
}
/**
@@ -2052,67 +2132,27 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
/**
- * e1000e_read_mac_addr - Read device MAC address
+ * e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
* Reads the device MAC address from the EEPROM and stores the value.
* Since devices with two ports use the same EEPROM, we increment the
* last bit in the MAC address for the second port.
**/
-s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 offset, nvm_data, i;
- u16 mac_addr_offset = 0;
-
- if (hw->mac.type == e1000_82571) {
- /* Check for an alternate MAC address. An alternate MAC
- * address can be setup by pre-boot software and must be
- * treated like a permanent address and must override the
- * actual permanent MAC address.*/
- ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
- &mac_addr_offset);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (mac_addr_offset == 0xFFFF)
- mac_addr_offset = 0;
-
- if (mac_addr_offset) {
- if (hw->bus.func == E1000_FUNC_1)
- mac_addr_offset += ETH_ALEN/sizeof(u16);
-
- /* make sure we have a valid mac address here
- * before using it */
- ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
- &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (nvm_data & 0x0001)
- mac_addr_offset = 0;
- }
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
- if (mac_addr_offset)
- hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
- }
+ rar_high = er32(RAH(0));
+ rar_low = er32(RAL(0));
- for (i = 0; i < ETH_ALEN; i += 2) {
- offset = mac_addr_offset + (i >> 1);
- ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
- hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
- }
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
- /* Flip last bit of mac address if we're on second port */
- if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
- hw->mac.perm_addr[5] ^= 1;
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
@@ -2287,10 +2327,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
s32 ret_val, hdr_csum, csum;
u8 i, len;
+ hw->mac.tx_pkt_filtering = true;
+
/* No manageability, no filtering */
if (!e1000e_check_mng_mode(hw)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
/*
@@ -2298,9 +2340,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val != 0) {
+ if (ret_val) {
hw->mac.tx_pkt_filtering = false;
- return ret_val;
+ goto out;
}
/* Read in the header. Length and offset are in dwords. */
@@ -2319,17 +2361,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
*/
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
hw->mac.tx_pkt_filtering = true;
- return 1;
+ goto out;
}
/* Cookie area is valid, make the final check for filtering. */
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
- hw->mac.tx_pkt_filtering = true;
- return 1;
+out:
+ return hw->mac.tx_pkt_filtering;
}
/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 762b697ce731..fb8fc7d1b50d 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -36,6 +36,7 @@
#include <linux/netdevice.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/mii.h>
@@ -450,13 +451,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
- /* !EOP means multiple descriptors were used to store a single
- * packet, also make sure the frame isn't just CRC only */
- if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
+ /*
+ * !EOP means multiple descriptors were used to store a single
+ * packet, if that's the case we need to toss it. In fact, we
+ * need to toss every packet with the EOP bit clear and the
+ * next frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
/* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -650,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
i = 0;
}
+ if (i == tx_ring->next_to_use)
+ break;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
@@ -745,10 +758,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
- if (!(staterr & E1000_RXD_STAT_EOP)) {
+ /* see !EOP comment in other rx routine */
+ if (!(staterr & E1000_RXD_STAT_EOP))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
e_dbg("Packet Split buffers didn't pick up the full "
"packet\n");
dev_kfree_skb_irq(skb);
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -1118,6 +1137,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, adapter->hw.hw_addr + rx_ring->head);
writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2272,8 +2292,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TCTL, tctl);
e1000e_config_collision_dist(hw);
-
- adapter->tx_queue_len = adapter->netdev->tx_queue_len;
}
/**
@@ -2333,18 +2351,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
- case 256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
case 2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -2536,22 +2542,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates the Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this. Currently no func pointer
- * exists and all implementations are handled in the generic version of this
- * function.
**/
static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, u32 rar_used_count,
- u32 rar_count)
+ u32 mc_addr_count)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
}
/**
@@ -2567,7 +2565,6 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
struct dev_mc_list *mc_ptr;
u8 *mta_list;
u32 rctl;
@@ -2593,31 +2590,25 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
-
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
- ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ mc_ptr->dmi_addr, ETH_ALEN);
- e1000_update_mc_addr_list(hw, mta_list, i, 1,
- mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
} else {
/*
* if we're called from probe, we might not have
* anything to do here, so clear out the list
*/
- e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, NULL, 0);
}
}
@@ -2887,7 +2878,6 @@ void e1000e_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3315,24 +3305,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
- adapter->stats.scc += phy_data;
+ if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
+ adapter->stats.scc += phy_data;
e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
- adapter->stats.ecol += phy_data;
+ if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
+ adapter->stats.ecol += phy_data;
e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
- adapter->stats.mcc += phy_data;
+ if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
+ adapter->stats.mcc += phy_data;
e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
- adapter->stats.latecol += phy_data;
+ if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
+ adapter->stats.latecol += phy_data;
e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- e1e_rphy(hw, HV_DC_LOWER, &phy_data);
- adapter->stats.dc += phy_data;
+ if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
+ adapter->stats.dc += phy_data;
} else {
adapter->stats.scc += er32(SCC);
adapter->stats.ecol += er32(ECOL);
@@ -3360,8 +3350,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
- hw->mac.collision_delta = phy_data;
+ if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
+ hw->mac.collision_delta = phy_data;
} else {
hw->mac.collision_delta = er32(COLC);
}
@@ -3372,8 +3362,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
- adapter->stats.tncrs += phy_data;
+ if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
+ adapter->stats.tncrs += phy_data;
} else {
if ((hw->mac.type != e1000_82574) &&
(hw->mac.type != e1000_82583))
@@ -3477,7 +3467,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
}
-bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = 0;
@@ -3558,7 +3548,7 @@ static void e1000_watchdog_task(struct work_struct *work)
u32 link, tctl;
int tx_pending = 0;
- link = e1000_has_link(adapter);
+ link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
e1000e_enable_receives(adapter);
goto link_up;
@@ -3598,21 +3588,15 @@ static void e1000_watchdog_task(struct work_struct *work)
"link gets many collisions.\n");
}
- /*
- * tweak tx_queue_len according to speed/duplex
- * and adjust the timeout factor
- */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
txb2b = 0;
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = 0;
- netdev->tx_queue_len = 100;
adapter->tx_timeout_factor = 10;
break;
}
@@ -3781,7 +3765,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -3962,13 +3946,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_put_txbuf(adapter, buffer_info);;
}
@@ -4299,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
+ /* 82573 Errata 17 */
+ if (((adapter->hw.mac.type == e1000_82573) ||
+ (adapter->hw.mac.type == e1000_82574)) &&
+ (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+ e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+ }
+
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
msleep(1);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4317,13 +4309,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* fragmented skbs
*/
- if (max_frame <= 256)
- adapter->rx_buffer_len = 256;
- else if (max_frame <= 512)
- adapter->rx_buffer_len = 512;
- else if (max_frame <= 1024)
- adapter->rx_buffer_len = 1024;
- else if (max_frame <= 2048)
+ if (max_frame <= 2048)
adapter->rx_buffer_len = 2048;
else
adapter->rx_buffer_len = 4096;
@@ -4627,29 +4613,39 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
}
}
-static void e1000e_disable_l1aspm(struct pci_dev *pdev)
+#ifdef CONFIG_PCIEASPM
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ pci_disable_link_state(pdev, state);
+}
+#else
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
int pos;
- u16 val;
+ u16 reg16;
/*
- * 82573 workaround - disable L1 ASPM on mobile chipsets
- *
- * L1 ASPM on various mobile (ich7) chipsets do not behave properly
- * resulting in lost data or garbage information on the pci-e link
- * level. This could result in (false) bad EEPROM checksum errors,
- * long ping times (up to 2s) or even a system freeze/hang.
- *
- * Unfortunately this feature saves about 1W power consumption when
- * active.
+ * Both device and parent should have the same ASPM setting.
+ * Disable ASPM in downstream component first and then upstream.
*/
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
- if (val & 0x2) {
- dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
- val &= ~0x2;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
- }
+ pos = pci_pcie_cap(pdev);
+ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+ pos = pci_pcie_cap(pdev->bus->self);
+ pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+#endif
+void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+ (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+
+ __e1000e_disable_aspm(pdev, state);
}
#ifdef CONFIG_PM
@@ -4674,7 +4670,9 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- e1000e_disable_l1aspm(pdev);
+ pci_save_state(pdev);
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -4816,7 +4814,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
int err;
pci_ers_result_t result;
- e1000e_disable_l1aspm(pdev);
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
@@ -4825,6 +4824,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4909,13 +4909,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
dev_warn(&adapter->pdev->dev,
"Warning: detected DSPD enabled in EEPROM\n");
}
-
- ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
- if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
- /* ASPM enable */
- dev_warn(&adapter->pdev->dev,
- "Warning: detected ASPM enabled in EEPROM\n");
- }
}
static const struct net_device_ops e1000e_netdev_ops = {
@@ -4964,7 +4957,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
- e1000e_disable_l1aspm(pdev);
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+ e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
err = pci_enable_device_mem(pdev);
if (err)
@@ -5133,7 +5127,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_eeprom_checks(adapter);
- /* copy the MAC address out of the NVM */
+ /* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
e_err("NVM Read Error while reading MAC address\n");
@@ -5325,7 +5319,7 @@ static struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 55a2c0acfee7..7f3ceb9dad6a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -152,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
goto out;
- /*
- * If the PHY ID is still unknown, we may have an 82577
- * without link. We will try again after setting Slow MDIC
- * mode. No harm in trying again in this case since the PHY
- * ID is unknown at this point anyway.
- */
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- goto out;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
- phy->ops.release(hw);
-
retry_count++;
}
out:
- /* Revert to MDIO fast mode, if applicable */
- if (retry_count) {
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
- phy->ops.release(hw);
- }
-
return ret_val;
}
@@ -2791,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
}
/**
- * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
- * @hw: pointer to the HW structure
- * @slow: true for slow mode, false for normal mode
- *
- * Assumes semaphore already acquired.
- **/
-s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
-{
- s32 ret_val = 0;
- u16 data = 0;
-
- /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
- hw->phy.addr = 1;
- ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val)
- goto out;
-
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
- (0x2180 | (slow << 10)));
- if (ret_val)
- goto out;
-
- /* dummy read when reverting to fast mode - throw away result */
- if (!slow)
- ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
-out:
- return ret_val;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -2839,7 +2784,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2847,16 +2791,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -2893,10 +2827,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
@@ -2948,7 +2878,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2956,16 +2885,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3019,10 +2938,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 94c59498cdb6..27c7bdbfa003 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -137,7 +137,6 @@ static const char version[] =
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
@@ -1287,9 +1286,10 @@ set_multicast_list(struct net_device *dev)
struct eepro_local *lp = netdev_priv(dev);
short ioaddr = dev->base_addr;
unsigned short mode;
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
+ int mc_count = netdev_mc_count(dev);
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1299,7 +1299,7 @@ set_multicast_list(struct net_device *dev)
eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
}
- else if (dev->mc_count==0 )
+ else if (mc_count == 0)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1329,12 +1329,10 @@ set_multicast_list(struct net_device *dev)
outw(MC_SETUP, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
- outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+ outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- for (i = 0; i < dev->mc_count; i++)
- {
- eaddrs=(unsigned short *)dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ eaddrs = (unsigned short *) dmi->dmi_addr;
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
@@ -1348,7 +1346,7 @@ set_multicast_list(struct net_device *dev)
outb(MC_SETUP, ioaddr);
/* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+ i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
if (lp->tx_start != lp->tx_end)
{
@@ -1380,8 +1378,8 @@ set_multicast_list(struct net_device *dev)
break;
} else if ((i & 0x0f) == 0x03) { /* MC-Done */
printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, dev->mc_count,
- dev->mc_count > 1 ? "es":"");
+ dev->name, mc_count,
+ mc_count > 1 ? "es":"");
break;
}
}
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 6fbfc8eee632..1a7322b80ea7 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -111,7 +111,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/mca-legacy.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
@@ -1578,7 +1577,7 @@ static void eexp_setup_filter(struct net_device *dev)
{
struct dev_mc_list *dmi;
unsigned short ioaddr = dev->base_addr;
- int count = dev->mc_count;
+ int count = netdev_mc_count(dev);
int i;
if (count > 8) {
printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
@@ -1588,23 +1587,19 @@ static void eexp_setup_filter(struct net_device *dev)
outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
- for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) {
- unsigned short *data;
- if (!dmi) {
- printk(KERN_INFO "%s: too few multicast addresses\n", dev->name);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
+ unsigned short *data = (unsigned short *) dmi->dmi_addr;
+
+ if (i == count)
break;
- }
- if (dmi->dmi_addrlen != ETH_ALEN) {
- printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
- continue;
- }
- data = (unsigned short *)dmi->dmi_addr;
outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
+ i++;
}
}
@@ -1627,9 +1622,9 @@ eexp_set_multicast(struct net_device *dev)
}
if (!(dev->flags & IFF_PROMISC)) {
eexp_setup_filter(dev);
- if (lp->old_mc_count != dev->mc_count) {
+ if (lp->old_mc_count != netdev_mc_count(dev)) {
kick = 1;
- lp->old_mc_count = dev->mc_count;
+ lp->old_mc_count = netdev_mc_count(dev);
}
}
if (kick) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 7b62336e6736..809ccc9ff09c 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -32,6 +32,7 @@
#include <linux/udp.h>
#include <linux/if.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/if_ether.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
@@ -1967,7 +1968,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct dev_mc_list *k_mcl_entry;
- int ret, i;
+ int ret;
if (dev->flags & IFF_PROMISC) {
ehea_promiscuous(dev, 1);
@@ -1981,7 +1982,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
ehea_allmulti(dev, 0);
- if (dev->mc_count) {
+ if (!netdev_mc_empty(dev)) {
ret = ehea_drop_multicast_list(dev);
if (ret) {
/* Dropping the current multicast list failed.
@@ -1990,15 +1991,14 @@ static void ehea_set_multicast_list(struct net_device *dev)
ehea_allmulti(dev, 1);
}
- if (dev->mc_count > port->adapter->max_mc_mac) {
+ if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
ehea_info("Mcast registration limit reached (0x%llx). "
"Use ALLMULTI!",
port->adapter->max_mc_mac);
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
- k_mcl_entry = k_mcl_entry->next)
+ netdev_for_each_mc_addr(k_mcl_entry, dev)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 18d405f78c0f..a1b4c7e56367 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -27,6 +27,7 @@
*/
#include <linux/mm.h>
+#include <linux/slab.h>
#include "ehea.h"
#include "ehea_phyp.h"
#include "ehea_qmr.h"
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 66813c91a720..ff27f728fd9d 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -18,7 +18,6 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -1413,7 +1412,7 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
if (netif_msg_link(priv))
dev_info(&dev->dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
- } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) {
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
dev_info(&dev->dev, "%smulticast mode\n",
(dev->flags & IFF_ALLMULTI) ? "all-" : "");
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1c2076228ba..ee01f5a6d0d4 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -34,7 +34,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.100"
+#define DRV_VERSION "1.1.0.241a"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
@@ -89,9 +89,12 @@ struct enic {
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int flags;
unsigned int mc_count;
int csum_rx_enabled;
u32 port_mtu;
+ u32 rx_coalesce_usecs;
+ u32 tx_coalesce_usecs;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f875751af15e..cf098bb636b8 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
/* Supported devices */
-static struct pci_device_id enic_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
{ 0, } /* end of table */
};
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value)
enic->msg_enable = value;
}
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+
+ tx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->tx_coalesce_usecs);
+ rx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->rx_coalesce_usecs);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
+ INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = enic_set_tso,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic)
u32 mtu = vnic_dev_mtu(enic->vdev);
if (mtu && mtu != enic->port_mtu) {
+ enic->port_mtu = mtu;
if (mtu < enic->netdev->mtu)
printk(KERN_WARNING PFX
"%s: interface MTU (%d) set higher "
"than switch port MTU (%d)\n",
enic->netdev->name, enic->netdev->mtu, mtu);
- enic->port_mtu = mtu;
}
}
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
/* netif_tx_lock held, process context with BHs disabled, or BH */
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+ struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq = &enic->wq[0];
@@ -764,15 +822,16 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
static void enic_set_multicast_list(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct dev_mc_list *list = netdev->mc_list;
+ struct dev_mc_list *list;
int directed = 1;
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
+ unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
+ unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int mc_count = netdev->mc_count;
unsigned int i, j;
if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
spin_lock(&enic->devcmd_lock);
- vnic_dev_packet_filter(enic->vdev, directed,
- multicast, broadcast, promisc, allmulti);
+ if (enic->flags != flags) {
+ enic->flags = flags;
+ vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ }
/* Is there an easier way? Trying to minimize to
* calls to add/del multicast addrs. We keep the
@@ -789,9 +851,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
* look for changes to add/del.
*/
- for (i = 0; list && i < mc_count; i++) {
- memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN);
- list = list->next;
+ i = 0;
+ netdev_for_each_mc_addr(list, netdev) {
+ if (i == mc_count)
+ break;
+ memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN);
}
for (i = 0; i < enic->mc_count; i++) {
@@ -1084,34 +1148,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
-static void enic_rq_drop_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb = buf->os_buf;
-
- if (skipped)
- return;
-
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
-
- dev_kfree_skb_any(skb);
-}
-
-static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_drop_buf, opaque);
-
- return 0;
-}
-
static int enic_poll(struct napi_struct *napi, int budget)
{
struct enic *enic = container_of(napi, struct enic, napi);
@@ -1119,6 +1155,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int work_done, rq_work_done, wq_work_done;
+ int err;
/* Service RQ (first) and WQ
*/
@@ -1142,16 +1179,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- if (rq_work_done > 0) {
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
- /* Replenish RQ
- */
+ /* Buffer allocation failed. Stay in polling
+ * mode so we can try to fill the ring again.
+ */
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+ if (err)
+ rq_work_done = rq_work_to_do;
- } else {
+ if (rq_work_done < rq_work_to_do) {
- /* If no work done, flush all LROs and exit polling
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1170,6 +1210,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
struct net_device *netdev = enic->netdev;
unsigned int work_to_do = budget;
unsigned int work_done;
+ int err;
/* Service RQ
*/
@@ -1177,25 +1218,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
work_to_do, enic_rq_service, NULL);
- if (work_done > 0) {
-
- /* Replenish RQ
- */
-
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
-
- /* Return intr event credits for this polling
- * cycle. An intr event is the completion of a
- * RQ packet.
- */
+ /* Return intr event credits for this polling
+ * cycle. An intr event is the completion of a
+ * RQ packet.
+ */
+ if (work_done > 0)
vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
work_done,
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- } else {
- /* If no work done, flush all LROs and exit polling
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+
+ /* Buffer allocation failed. Stay in polling mode
+ * so we can try to fill the ring again.
+ */
+
+ if (err)
+ work_done = work_to_do;
+
+ if (work_done < work_to_do) {
+
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1304,6 +1350,24 @@ static int enic_request_intr(struct enic *enic)
return err;
}
+static void enic_synchronize_irqs(struct enic *enic)
+{
+ unsigned int i;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ synchronize_irq(enic->pdev->irq);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->intr_count; i++)
+ synchronize_irq(enic->msix_entry[i].vector);
+ break;
+ default:
+ break;
+ }
+}
+
static int enic_notify_set(struct enic *enic)
{
int err;
@@ -1360,11 +1424,13 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
- if (err) {
+ vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
printk(KERN_ERR PFX
"%s: Unable to alloc receive buffers.\n",
netdev->name);
+ err = -ENOMEM;
goto err_out_notify_unset;
}
}
@@ -1409,16 +1475,19 @@ static int enic_stop(struct net_device *netdev)
unsigned int i;
int err;
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_mask(&enic->intr[i]);
+
+ enic_synchronize_irqs(enic);
+
del_timer_sync(&enic->notify_timer);
spin_lock(&enic->devcmd_lock);
vnic_dev_disable(enic->vdev);
spin_unlock(&enic->devcmd_lock);
napi_disable(&enic->napi);
- netif_stop_queue(netdev);
-
- for (i = 0; i < enic->intr_count; i++)
- vnic_intr_mask(&enic->intr[i]);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
@@ -1436,11 +1505,6 @@ static int enic_stop(struct net_device *netdev)
spin_unlock(&enic->devcmd_lock);
enic_free_intr(enic);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
- -1, enic_rq_service_drop, NULL);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
- -1, enic_wq_service, NULL);
-
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
for (i = 0; i < enic->rq_count; i++)
@@ -1762,7 +1826,8 @@ int enic_dev_init(struct enic *enic)
err = enic_set_intr_mode(enic);
if (err) {
printk(KERN_ERR PFX
- "Failed to set intr mode, aborting.\n");
+ "Failed to set intr mode based on resource "
+ "counts and system capabilities, aborting.\n");
return err;
}
@@ -1986,6 +2051,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_dev_deinit;
}
+ enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
+
netdev->netdev_ops = &enic_netdev_ops;
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 32111144efc9..02839bf0fe8b 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -66,21 +66,21 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(wq_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(mtu);
- GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
c->wq_desc_count));
- c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
min_t(u32, ENIC_MAX_RQ_DESCS,
max_t(u32, ENIC_MIN_RQ_DESCS,
c->rq_desc_count));
- c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
@@ -88,15 +88,17 @@ int enic_get_vnic_config(struct enic *enic)
max_t(u16, ENIC_MIN_MTU,
c->mtu));
- c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+ c->intr_timer_usec = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ c->intr_timer_usec);
printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
- "intr timer %d\n",
+ "intr timer %d usec\n",
c->mtu, ENIC_SETTING(enic, TXCSUM),
ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
- ENIC_SETTING(enic, LRO), c->intr_timer);
+ ENIC_SETTING(enic, LRO), c->intr_timer_usec);
return 0;
}
@@ -303,7 +305,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
- enic->config.intr_timer,
+ INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
enic->config.intr_timer_type,
mask_on_assertion);
}
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 29a48e8b59d3..cf22de71014e 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
+#include <linux/slab.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
@@ -36,7 +37,6 @@ struct vnic_res {
};
#define VNIC_DEV_CAP_INIT 0x0001
-#define VNIC_DEV_CAP_PERBI 0x0002
struct vnic_dev {
void *priv;
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 6332ac9391b8..8eeb6758491b 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,6 +20,10 @@
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
+/* Hardware intr coalesce timer is in units of 1.5us */
+#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
+#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
+
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
@@ -30,6 +34,7 @@ struct vnic_enet_config {
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
+ u32 intr_timer_usec;
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 1f8786d7195e..3934309a9498 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -50,12 +50,18 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
- iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer)
+{
+ iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+}
+
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 9a53604edce6..2fe6c6339e3c 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -61,6 +61,7 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
+ (void)ioread32(&intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
@@ -101,6 +102,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index eeaf329945d8..cf80ab46d582 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -41,12 +41,12 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
-#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 0)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 1)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 2)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 3)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 4)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index 75583978a5e5..e186efaf9da1 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_rq.h"
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index d2e00e51b7b5..d5f984357f5c 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec8..7a567201e829 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -73,7 +73,6 @@ static int rx_copybreak;
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -167,7 +166,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
};
-static struct pci_device_id epic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
@@ -1390,21 +1389,20 @@ static void set_rx_mode(struct net_device *dev)
outl(0x002C, ioaddr + RxCtrl);
/* Unconditionally log net taps. */
memset(mc_filter, 0xff, sizeof(mc_filter));
- } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
/* There is apparently a chip bug, so the multicast filter
is never enabled. */
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outl(0x000C, ioaddr + RxCtrl);
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit_nr =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit_nr >> 3] |= (1 << bit_nr);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index f5b96cadeb25..b34a2ddeef4c 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -115,6 +115,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 71bfeec33a0b..d4e24f08b3ba 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -152,7 +152,6 @@ static char *version =
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -1359,7 +1358,7 @@ static void eth16i_multicast(struct net_device *dev)
{
int ioaddr = dev->base_addr;
- if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{
outb(3, ioaddr + RECEIVE_MODE_REG);
} else {
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index bd1db92aec1b..a8d92503226e 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -18,6 +18,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <net/ethoc.h>
static int buffer_size = 0x8000; /* 32 KBytes */
@@ -755,7 +756,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 mode = ethoc_read(priv, MODER);
- struct dev_mc_list *mc = NULL;
+ struct dev_mc_list *mc;
u32 hash[2] = { 0, 0 };
/* set loopback mode if requested */
@@ -783,8 +784,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
hash[0] = 0xffffffff;
hash[1] = 0xffffffff;
} else {
- for (mc = dev->mc_list; mc; mc = mc->next) {
- u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr);
+ netdev_for_each_mc_addr(mc, dev) {
+ u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr);
int bit = (crc >> 26) & 0x3f;
hash[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -904,7 +905,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
mmio = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mmio) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
@@ -917,7 +918,7 @@ static int ethoc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
mem = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mem) {
dev_err(&pdev->dev, "cannot request memory space\n");
ret = -ENXIO;
@@ -945,7 +946,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->dma_alloc = 0;
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
- mmio->end - mmio->start + 1);
+ resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
@@ -954,7 +955,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (netdev->mem_end) {
priv->membase = devm_ioremap_nocache(&pdev->dev,
- netdev->mem_start, mem->end - mem->start + 1);
+ netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index dd4ba01fd92d..91e59f3a9d6d 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1169,7 +1169,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct ewrk3_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u_long iobase = dev->base_addr;
int i;
char *addrs, bit, byte;
@@ -1213,9 +1213,8 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589e..d11ae5197f01 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -74,7 +74,6 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -1786,18 +1785,16 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_AB | CR_W_AM;
} else {
struct dev_mc_list *mclist;
- int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit;
bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
mc_filter[bit >> 5] |= (1 << bit);
@@ -1941,7 +1938,7 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-static struct pci_device_id fealnx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 16a1d58419d9..9f98c1c4a344 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1128,6 +1128,26 @@ static phy_info_t phy_info_dp83848= {
},
};
+static phy_info_t phy_info_lan8700 = {
+ 0x0007C0C,
+ "LAN8700",
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* act_int */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_end, }
+ },
+};
/* ------------------------------------------------------------------------- */
static phy_info_t const * const phy_info[] = {
@@ -1137,6 +1157,7 @@ static phy_info_t const * const phy_info[] = {
&phy_info_am79c874,
&phy_info_ks8721bl,
&phy_info_dp83848,
+ &phy_info_lan8700,
NULL
};
@@ -1554,7 +1575,7 @@ static void set_multicast_list(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct dev_mc_list *dmi;
- unsigned int i, j, bit, data, crc, tmp;
+ unsigned int i, bit, data, crc, tmp;
unsigned char hash;
if (dev->flags & IFF_PROMISC) {
@@ -1583,9 +1604,7 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- dmi = dev->mc_list;
-
- for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
/* Only support group multicast for now */
if (!(dmi->dmi_addr[0] & 1))
continue;
@@ -1658,6 +1677,7 @@ static int fec_enet_init(struct net_device *dev, int index)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base;
+ struct bufdesc *bdp;
int i;
/* Allocate memory for buffer descriptors. */
@@ -1710,6 +1730,34 @@ static int fec_enet_init(struct net_device *dev, int index)
/* Set MII speed to 2.5 MHz */
fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
/ 2500000) / 2) & 0x3F) << 1;
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
fec_restart(dev, 0);
/* Queue up command to detect the PHY and initialize the
@@ -1730,7 +1778,6 @@ static void
fec_restart(struct net_device *dev, int duplex)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct bufdesc *bdp;
int i;
/* Whack a reset. We should wait for this. */
@@ -1768,33 +1815,6 @@ fec_restart(struct net_device *dev, int duplex)
}
}
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
/* Enable MII mode */
if (duplex) {
/* MII enable / FD enable */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 848e8407ea8f..4a43e56b7394 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -575,19 +576,16 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
out_be32(&fec->gaddr2, 0xffffffff);
} else {
u32 crc;
- int i;
struct dev_mc_list *dmi;
u32 gaddr1 = 0x00000000;
u32 gaddr2 = 0x00000000;
- dmi = dev->mc_list;
- for (i=0; i<dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
else
gaddr2 |= 1 << crc;
- dmi = dmi->next;
}
out_be32(&fec->gaddr1, gaddr1);
out_be32(&fec->gaddr2, gaddr2);
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index ee0f3c6d3f88..7658a082e390 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/of_platform.h>
+#include <linux/slab.h>
#include <linux/of_mdio.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804a..5c98f7c22425 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -59,6 +59,7 @@
#include <linux/init.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -3095,7 +3096,7 @@ static void nv_set_multicast(struct net_device *dev)
} else {
pff |= NVREG_PFF_MYADDR;
- if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
u32 alwaysOff[2];
u32 alwaysOn[2];
@@ -3105,8 +3106,7 @@ static void nv_set_multicast(struct net_device *dev)
} else {
struct dev_mc_list *walk;
- walk = dev->mc_list;
- while (walk != NULL) {
+ netdev_for_each_mc_addr(walk, dev) {
u32 a, b;
a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
@@ -3114,7 +3114,6 @@ static void nv_set_multicast(struct net_device *dev)
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
alwaysOff[1] &= ~b;
- walk = walk->next;
}
}
addr[0] = alwaysOn[0];
@@ -5900,7 +5899,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* Limit the number of tx's outstanding for hw bug */
if (id->driver_data & DEV_NEED_TX_LIMIT) {
np->tx_limit = 1;
- if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
+ if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
pci_dev->revision >= 0xA2)
np->tx_limit = 0;
}
@@ -6198,7 +6197,7 @@ static void nv_shutdown(struct pci_dev *pdev)
#define nv_resume NULL
#endif /* CONFIG_PM */
-static struct pci_device_id pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
index 562ea68ed99b..fc073b5a38c7 100644
--- a/drivers/net/fs_enet/Kconfig
+++ b/drivers/net/fs_enet/Kconfig
@@ -1,9 +1,13 @@
config FS_ENET
tristate "Freescale Ethernet Driver"
- depends on CPM1 || CPM2
+ depends on CPM1 || CPM2 || PPC_MPC512x
select MII
select PHYLIB
+config FS_ENET_MPC5121_FEC
+ def_bool y if (FS_ENET && PPC_MPC512x)
+ select FS_ENET_HAS_FEC
+
config FS_ENET_HAS_SCC
bool "Chip has an SCC usable for ethernet"
depends on FS_ENET && (CPM1 || CPM2)
@@ -16,13 +20,13 @@ config FS_ENET_HAS_FCC
config FS_ENET_HAS_FEC
bool "Chip has an FEC usable for ethernet"
- depends on FS_ENET && CPM1
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
select FS_ENET_MDIO_FEC
default y
config FS_ENET_MDIO_FEC
tristate "MDIO driver for FEC"
- depends on FS_ENET && CPM1
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
config FS_ENET_MDIO_FCC
tristate "MDIO driver for FCC"
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index ec2f5034457f..0770e2f6da6b 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -108,9 +108,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
* the last indicator should be set.
*/
if ((sc & BD_ENET_RX_LAST) == 0)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s rcv is not +last\n",
- dev->name);
+ dev_warn(fep->dev, "rcv is not +last\n");
/*
* Check for errors.
@@ -178,9 +176,8 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
received++;
netif_receive_skb(skb);
} else {
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s Memory squeeze, dropping packet.\n",
- dev->name);
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -242,9 +239,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
* the last indicator should be set.
*/
if ((sc & BD_ENET_RX_LAST) == 0)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s rcv is not +last\n",
- dev->name);
+ dev_warn(fep->dev, "rcv is not +last\n");
/*
* Check for errors.
@@ -313,9 +308,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
received++;
netif_rx(skb);
} else {
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s Memory squeeze, dropping packet.\n",
- dev->name);
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -388,10 +382,10 @@ static void fs_enet_tx(struct net_device *dev)
} else
fep->stats.tx_packets++;
- if (sc & BD_ENET_TX_READY)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s HEY! Enet xmit interrupt and TX_READY.\n",
- dev->name);
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
/*
* Deferred means some collisions occurred during transmit,
@@ -511,9 +505,8 @@ void fs_init_bds(struct net_device *dev)
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = dev_alloc_skb(ENET_RX_FRSIZE);
if (skb == NULL) {
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s Memory squeeze, unable to allocate skb\n",
- dev->name);
+ dev_warn(fep->dev,
+ "Memory squeeze, unable to allocate skb\n");
break;
}
skb_align(skb, ENET_RX_ALIGN);
@@ -587,6 +580,40 @@ void fs_cleanup_bds(struct net_device *dev)
/**********************************************************************************/
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+/*
+ * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
+ */
+static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ /* Alloc new skb */
+ new_skb = dev_alloc_skb(skb->len + 4);
+ if (!new_skb) {
+ if (net_ratelimit()) {
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping tx packet.\n");
+ }
+ return NULL;
+ }
+
+ /* Make sure new skb is properly aligned */
+ skb_align(new_skb, 4);
+
+ /* Copy data to new skb ... */
+ skb_copy_from_linear_data(skb, new_skb->data, skb->len);
+ skb_put(new_skb, skb->len);
+
+ /* ... and free an old one */
+ dev_kfree_skb_any(skb);
+
+ return new_skb;
+}
+#endif
+
static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -595,6 +622,19 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 sc;
unsigned long flags;
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ if (((unsigned long)skb->data) & 0x3) {
+ skb = tx_skb_align_workaround(dev, skb);
+ if (!skb) {
+ /*
+ * We have lost packet due to memory allocation error
+ * in tx_skb_align_workaround(). Hopefully original
+ * skb is still valid, so try transmit it later.
+ */
+ return NETDEV_TX_BUSY;
+ }
+ }
+#endif
spin_lock_irqsave(&fep->tx_lock, flags);
/*
@@ -610,8 +650,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since the tx queue should be stopped.
*/
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s tx queue full!.\n", dev->name);
+ dev_warn(fep->dev, "tx queue full!.\n");
return NETDEV_TX_BUSY;
}
@@ -788,8 +827,7 @@ static int fs_enet_open(struct net_device *dev)
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
"fs_enet-mac", dev);
if (r != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s Could not allocate FS_ENET IRQ!", dev->name);
+ dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
if (fep->fpi->use_napi)
napi_disable(&fep->napi);
return -EINVAL;
@@ -1053,7 +1091,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
if (ret)
goto out_free_bd;
- printk(KERN_INFO "%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
+ pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
return 0;
@@ -1103,11 +1141,18 @@ static struct of_device_id fs_enet_match[] = {
},
#endif
#ifdef CONFIG_FS_ENET_HAS_FEC
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ {
+ .compatible = "fsl,mpc5121-fec",
+ .data = (void *)&fs_fec_ops,
+ },
+#else
{
.compatible = "fsl,pq1-fec-enet",
.data = (void *)&fs_fec_ops,
},
#endif
+#endif
{}
};
MODULE_DEVICE_TABLE(of, fs_enet_match);
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index ef01e09781a5..1ece4b1a689e 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -13,9 +13,56 @@
#ifdef CONFIG_CPM1
#include <asm/cpm1.h>
+#endif
+
+#if defined(CONFIG_FS_ENET_HAS_FEC)
+#include <asm/cpm.h>
+
+#if defined(CONFIG_FS_ENET_MPC5121_FEC)
+/* MPC5121 FEC has different register layout */
+struct fec {
+ u32 fec_reserved0;
+ u32 fec_ievent; /* Interrupt event reg */
+ u32 fec_imask; /* Interrupt mask reg */
+ u32 fec_reserved1;
+ u32 fec_r_des_active; /* Receive descriptor reg */
+ u32 fec_x_des_active; /* Transmit descriptor reg */
+ u32 fec_reserved2[3];
+ u32 fec_ecntrl; /* Ethernet control reg */
+ u32 fec_reserved3[6];
+ u32 fec_mii_data; /* MII manage frame reg */
+ u32 fec_mii_speed; /* MII speed control reg */
+ u32 fec_reserved4[7];
+ u32 fec_mib_ctrlstat; /* MIB control/status reg */
+ u32 fec_reserved5[7];
+ u32 fec_r_cntrl; /* Receive control reg */
+ u32 fec_reserved6[15];
+ u32 fec_x_cntrl; /* Transmit Control reg */
+ u32 fec_reserved7[7];
+ u32 fec_addr_low; /* Low 32bits MAC address */
+ u32 fec_addr_high; /* High 16bits MAC address */
+ u32 fec_opd; /* Opcode + Pause duration */
+ u32 fec_reserved8[10];
+ u32 fec_hash_table_high; /* High 32bits hash table */
+ u32 fec_hash_table_low; /* Low 32bits hash table */
+ u32 fec_grp_hash_table_high; /* High 32bits hash table */
+ u32 fec_grp_hash_table_low; /* Low 32bits hash table */
+ u32 fec_reserved9[7];
+ u32 fec_x_wmrk; /* FIFO transmit water mark */
+ u32 fec_reserved10;
+ u32 fec_r_bound; /* FIFO receive bound reg */
+ u32 fec_r_fstart; /* FIFO receive start reg */
+ u32 fec_reserved11[11];
+ u32 fec_r_des_start; /* Receive descriptor ring */
+ u32 fec_x_des_start; /* Transmit descriptor ring */
+ u32 fec_r_buff_size; /* Maximum receive buff size */
+ u32 fec_reserved12[26];
+ u32 fec_dma_control; /* DMA Endian and other ctrl */
+};
+#endif
struct fec_info {
- fec_t __iomem *fecp;
+ struct fec __iomem *fecp;
u32 mii_speed;
};
#endif
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 22e5a847a588..0a973e71876b 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -19,7 +19,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -34,6 +33,7 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/of_device.h>
+#include <linux/gfp.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
@@ -218,7 +218,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
W32(ep, fen_gaddrh, 0xffffffff);
W32(ep, fen_gaddrl, 0xffffffff);
@@ -235,7 +235,7 @@ static void set_multicast_list(struct net_device *dev)
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ netdev_for_each_mc_addr(pmc, dev)
set_multicast_one(dev, pmc->dmi_addr);
set_multicast_finish(dev);
} else
@@ -476,8 +476,9 @@ static void clear_int_events(struct net_device *dev, u32 int_events)
static void ev_error(struct net_device *dev, u32 int_events)
{
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ca7bcb8ab3a1..ec81f50d5919 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -19,7 +19,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -33,6 +32,7 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/gfp.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
@@ -80,7 +80,7 @@
*/
#define FEC_RESET_DELAY 50
-static int whack_reset(fec_t __iomem *fecp)
+static int whack_reset(struct fec __iomem *fecp)
{
int i;
@@ -168,7 +168,7 @@ static void cleanup_data(struct net_device *dev)
static void set_promiscuous_mode(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
}
@@ -216,11 +216,11 @@ static void set_multicast_one(struct net_device *dev, const u8 *mac)
static void set_multicast_finish(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
fep->fec.hthi = 0xffffffffU;
fep->fec.htlo = 0xffffffffU;
}
@@ -236,7 +236,7 @@ static void set_multicast_list(struct net_device *dev)
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ netdev_for_each_mc_addr(pmc, dev)
set_multicast_one(dev, pmc->dmi_addr);
set_multicast_finish(dev);
} else
@@ -246,7 +246,7 @@ static void set_multicast_list(struct net_device *dev)
static void restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
const struct fs_platform_info *fpi = fep->fpi;
dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
int r;
@@ -257,8 +257,7 @@ static void restart(struct net_device *dev)
r = whack_reset(fep->fec.fecp);
if (r != 0)
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s FEC Reset FAILED!\n", dev->name);
+ dev_err(fep->dev, "FEC Reset FAILED!\n");
/*
* Set station address.
*/
@@ -281,7 +280,11 @@ static void restart(struct net_device *dev)
* Set maximum receive buffer size.
*/
FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
+#else
FW(fecp, r_hash, PKT_MAXBUF_SIZE);
+#endif
/* get physical address */
rx_bd_base_phys = fep->ring_mem_addr;
@@ -298,7 +301,11 @@ static void restart(struct net_device *dev)
/*
* Enable big endian and don't care about SDMA FC.
*/
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FS(fecp, dma_control, 0xC0000000);
+#else
FW(fecp, fun_code, 0x78000000);
+#endif
/*
* Set MII speed.
@@ -309,9 +316,17 @@ static void restart(struct net_device *dev)
* Clear any outstanding interrupt.
*/
FW(fecp, ievent, 0xffc0);
+#ifndef CONFIG_FS_ENET_MPC5121_FEC
FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+#else
+ /*
+ * Only set MII mode - do not touch maximum frame length
+ * configured before.
+ */
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);
+#endif
/*
* adjust to duplex mode
*/
@@ -340,7 +355,7 @@ static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
struct fec_info* feci= fep->phydev->bus->priv;
@@ -355,9 +370,7 @@ static void stop(struct net_device *dev)
udelay(1);
if (i == FEC_RESET_DELAY)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s FEC timeout on graceful transmit stop\n",
- dev->name);
+ dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
/*
* Disable FEC. Let only MII interrupts.
*/
@@ -378,7 +391,7 @@ static void stop(struct net_device *dev)
static void napi_clear_rx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
}
@@ -386,7 +399,7 @@ static void napi_clear_rx_event(struct net_device *dev)
static void napi_enable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
}
@@ -394,7 +407,7 @@ static void napi_enable_rx(struct net_device *dev)
static void napi_disable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
}
@@ -402,7 +415,7 @@ static void napi_disable_rx(struct net_device *dev)
static void rx_bd_done(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, r_des_active, 0x01000000);
}
@@ -410,7 +423,7 @@ static void rx_bd_done(struct net_device *dev)
static void tx_kickstart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, x_des_active, 0x01000000);
}
@@ -418,7 +431,7 @@ static void tx_kickstart(struct net_device *dev)
static u32 get_int_events(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
return FR(fecp, ievent) & FR(fecp, imask);
}
@@ -426,32 +439,33 @@ static u32 get_int_events(struct net_device *dev)
static void clear_int_events(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, ievent, int_events);
}
static void ev_error(struct net_device *dev, u32 int_events)
{
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
{
struct fs_enet_private *fep = netdev_priv(dev);
- if (*sizep < sizeof(fec_t))
+ if (*sizep < sizeof(struct fec))
return -EINVAL;
- memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
+ memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
return 0;
}
static int get_regs_len(struct net_device *dev)
{
- return sizeof(fec_t);
+ return sizeof(struct fec);
}
static void tx_restart(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 008cdd9cc536..34d3da751eb4 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -19,7 +19,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -213,7 +212,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
W16(ep, sen_gaddr1, 0xffff);
W16(ep, sen_gaddr2, 0xffff);
@@ -228,7 +227,7 @@ static void set_multicast_list(struct net_device *dev)
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ netdev_for_each_mc_addr(pmc, dev)
set_multicast_one(dev, pmc->dmi_addr);
set_multicast_finish(dev);
} else
@@ -367,9 +366,7 @@ static void stop(struct net_device *dev)
udelay(1);
if (i == SCC_RESET_DELAY)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s SCC timeout on graceful transmit stop\n",
- dev->name);
+ dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
W16(sccp, scc_sccm, 0);
C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -429,8 +426,9 @@ static void clear_int_events(struct net_device *dev, u32 int_events)
static void ev_error(struct net_device *dev, u32 int_events)
{
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 96eba4280c5c..5944b65082cb 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -52,7 +52,7 @@
static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
{
struct fec_info* fec = bus->priv;
- fec_t __iomem *fecp = fec->fecp;
+ struct fec __iomem *fecp = fec->fecp;
int i, ret = -1;
BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
@@ -75,7 +75,7 @@ static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
struct fec_info* fec = bus->priv;
- fec_t __iomem *fecp = fec->fecp;
+ struct fec __iomem *fecp = fec->fecp;
int i;
/* this must never happen */
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 25fabb3eedc5..3acac5f930c8 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -46,6 +46,11 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+struct fsl_pq_mdio_priv {
+ void __iomem *map;
+ struct fsl_pq_mdio __iomem *regs;
+};
+
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus attached to the local interface, which may be different from the
@@ -105,7 +110,9 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
{
- return (void __iomem __force *)bus->priv;
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ return priv->regs;
}
/*
@@ -198,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
struct gfar __iomem *enet_regs;
- u32 __iomem *ioremap_tbipa;
- u64 addr, size;
/*
* This is mildly evil, but so is our hardware for doing this.
@@ -213,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
return &enet_regs->tbipa;
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
- addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
- ioremap_tbipa = ioremap(addr, size);
- return ioremap_tbipa;
+ return of_iomap(np, 1);
} else
return NULL;
}
@@ -266,31 +269,50 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
{
struct device_node *np = ofdev->node;
struct device_node *tbi;
+ struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
void __iomem *map;
u32 __iomem *tbipa;
struct mii_bus *new_bus;
int tbiaddr = -1;
+ const u32 *addrp;
u64 addr = 0, size = 0;
int err = 0;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
new_bus = mdiobus_alloc();
if (NULL == new_bus)
- return -ENOMEM;
+ goto err_free_priv;
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
new_bus->write = &fsl_pq_mdio_write,
new_bus->reset = &fsl_pq_mdio_reset,
+ new_bus->priv = priv;
fsl_pq_mdio_bus_name(new_bus->id, np);
+ addrp = of_get_address(np, 0, &size, NULL);
+ if (!addrp) {
+ err = -EINVAL;
+ goto err_free_bus;
+ }
+
/* Set the PHY base address */
- addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
+ addr = of_translate_address(np, addrp);
+ if (addr == OF_BAD_ADDR) {
+ err = -EINVAL;
+ goto err_free_bus;
+ }
+
map = ioremap(addr, size);
if (!map) {
err = -ENOMEM;
goto err_free_bus;
}
+ priv->map = map;
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
@@ -298,8 +320,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
of_device_is_compatible(np, "ucc_geth_phy"))
map -= offsetof(struct fsl_pq_mdio, miimcfg);
regs = map;
-
- new_bus->priv = (void __force *)regs;
+ priv->regs = regs;
new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
@@ -392,10 +413,11 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
err_free_irqs:
kfree(new_bus->irq);
err_unmap_regs:
- iounmap(regs);
+ iounmap(priv->map);
err_free_bus:
kfree(new_bus);
-
+err_free_priv:
+ kfree(priv);
return err;
}
@@ -404,14 +426,16 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
dev_set_drvdata(device, NULL);
- iounmap(fsl_pq_mdio_get_regs(bus));
+ iounmap(priv->map);
bus->priv = NULL;
mdiobus_free(bus);
+ kfree(priv);
return 0;
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e0620d084644..4e97ca182997 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
- .ndo_select_queue = gfar_select_queue,
.ndo_get_stats = gfar_get_stats,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
@@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
return priv->vlgrp || priv->rx_csum_enable;
}
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- return skb_get_queue_mapping(skb);
-}
static void free_tx_pointers(struct gfar_private *priv)
{
int i = 0;
@@ -555,12 +549,8 @@ static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
u32 *queue_mask;
- u64 addr, size;
-
- addr = of_translate_address(np,
- of_get_address(np, 0, &size, NULL));
- priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
+ priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
if (!priv->gfargrp[priv->num_grps].regs)
return -ENOMEM;
@@ -682,7 +672,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
priv->rx_queue[i] = NULL;
for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
+ priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
if (!priv->tx_queue[i]) {
err = -ENOMEM;
@@ -695,7 +685,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
}
for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+ priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
if (!priv->rx_queue[i]) {
err = -ENOMEM;
@@ -1004,7 +994,7 @@ static int gfar_probe(struct of_device *ofdev,
}
/* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_bit parses from right to left, which
+ * but, for_each_set_bit parses from right to left, which
* basically reverses the queue numbers */
for (i = 0; i< priv->num_grps; i++) {
priv->gfargrp[i].tx_bit_map = reverse_bitmap(
@@ -1017,7 +1007,7 @@ static int gfar_probe(struct of_device *ofdev,
* also assign queues to groups */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
- for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1025,7 +1015,7 @@ static int gfar_probe(struct of_device *ofdev,
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
- for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1126,10 +1116,10 @@ static int gfar_probe(struct of_device *ofdev,
/* provided which set of benchmarks. */
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
for (i = 0; i < priv->num_rx_queues; i++)
- printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+ printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
dev->name, i, priv->rx_queue[i]->rx_ring_size);
for(i = 0; i < priv->num_tx_queues; i++)
- printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+ printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
dev->name, i, priv->tx_queue[i]->tx_ring_size);
return 0;
@@ -1521,9 +1511,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
- while (!(gfar_read(&regs->ievent) &
- (IEVENT_GRSC | IEVENT_GTSC)))
- cpu_relax();
+ spin_event_timeout(((gfar_read(&regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
}
}
@@ -1644,13 +1634,13 @@ static void free_skb_resources(struct gfar_private *priv)
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- if(!tx_queue->tx_skbuff)
+ if(tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if(!rx_queue->rx_skbuff)
+ if(rx_queue->rx_skbuff)
free_skb_rx_queue(rx_queue);
}
@@ -1715,7 +1705,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
if (priv->mode == MQ_MG_MODE) {
baddr = &regs->txic0;
- for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
if (likely(priv->tx_queue[i]->txcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->tx_queue[i]->txic);
@@ -1723,7 +1713,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
}
baddr = &regs->rxic0;
- for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
if (likely(priv->rx_queue[i]->rxcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
@@ -2027,7 +2017,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* setup the TxBD length and buffer pointer for the first BD */
- tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
@@ -2059,6 +2048,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbdp_start->lstatus = lstatus;
+ eieio(); /* force lstatus write before tx_skbuff */
+
+ tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
/* Update the current skb pointer to the next entry we will use
* (wrapping if necessary) */
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
@@ -2396,6 +2389,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, alignamount);
+ GFAR_CB(skb)->alignamount = alignamount;
return skb;
}
@@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
fcb = (struct rxfcb *)skb->data;
/* Remove the FCB from the skb */
- skb_set_queue_mapping(skb, fcb->rq);
/* Remove the padded bytes, if there are any */
- if (amount_pull)
+ if (amount_pull) {
+ skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
+ }
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2535,13 +2530,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
newskb = skb;
else if (skb) {
/*
- * We need to reset ->data to what it
+ * We need to un-reserve() the skb to what it
* was before gfar_new_skb() re-aligned
* it to an RXBUF_ALIGNMENT boundary
* before we put the skb back on the
* recycle list.
*/
- skb->data = skb->head + NET_SKB_PAD;
+ skb_reserve(skb, -GFAR_CB(skb)->alignamount);
__skb_queue_head(&priv->rx_recycle, skb);
}
} else {
@@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
rx_queue->stats.rx_bytes += pkt_len;
-
+ skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull);
} else {
@@ -2612,7 +2607,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
- for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
if (test_bit(i, &serviced_queues))
continue;
rx_queue = priv->rx_queue[i];
@@ -2868,11 +2863,11 @@ static void gfar_set_multi(struct net_device *dev)
em_num = 0;
}
- if (dev->mc_count == 0)
+ if (netdev_mc_empty(dev))
return;
/* Parse the list, and set the appropriate bits */
- for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, dev) {
if (idx < em_num) {
gfar_set_mac_for_addr(dev, idx,
mc_ptr->dmi_addr);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 3d72dc43dca5..17d25e714236 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -566,6 +566,12 @@ struct rxfcb {
u16 vlctl; /* VLAN control word */
};
+struct gianfar_skb_cb {
+ int alignamount;
+};
+
+#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
+
struct rmon_mib
{
u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 1010367695e4..9bda023c0235 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index b98c6c512299..64f4094ac7f1 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -24,7 +24,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
new file mode 100644
index 000000000000..3a90430de918
--- /dev/null
+++ b/drivers/net/greth.c
@@ -0,0 +1,1635 @@
+/*
+ * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
+ *
+ * 2005-2009 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
+ * available in the GRLIB VHDL IP core library.
+ *
+ * Full documentation of both cores can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * The Gigabit version supports scatter/gather DMA, any alignment of
+ * buffers and checksum offloading.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Kristoffer Glembo
+ * Daniel Hellstrom
+ * Marko Isomaki
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/io.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#endif
+
+#include "greth.h"
+
+#define GRETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
+module_param(greth_debug, int, 0);
+MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
+
+/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
+static int macaddr[6];
+module_param_array(macaddr, int, NULL, 0);
+MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
+
+static int greth_edcl = 1;
+module_param(greth_edcl, int, 0);
+MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
+
+static int greth_open(struct net_device *dev);
+static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
+ struct net_device *dev);
+static int greth_rx(struct net_device *dev, int limit);
+static int greth_rx_gbit(struct net_device *dev, int limit);
+static void greth_clean_tx(struct net_device *dev);
+static void greth_clean_tx_gbit(struct net_device *dev);
+static irqreturn_t greth_interrupt(int irq, void *dev_id);
+static int greth_close(struct net_device *dev);
+static int greth_set_mac_add(struct net_device *dev, void *p);
+static void greth_set_multicast_list(struct net_device *dev);
+
+#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
+#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
+#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
+#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
+
+#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
+#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
+#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
+
+static void greth_print_rx_packet(void *addr, int len)
+{
+ print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ addr, len, true);
+}
+
+static void greth_print_tx_packet(struct sk_buff *skb)
+{
+ int i;
+ int length;
+
+ if (skb_shinfo(skb)->nr_frags == 0)
+ length = skb->len;
+ else
+ length = skb_headlen(skb);
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, length, true);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ length, true);
+ }
+}
+
+static inline void greth_enable_tx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
+}
+
+static inline void greth_disable_tx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
+}
+
+static inline void greth_enable_rx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
+}
+
+static inline void greth_disable_rx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
+}
+
+static inline void greth_enable_irqs(struct greth_private *greth)
+{
+ GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
+}
+
+static inline void greth_disable_irqs(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
+}
+
+static inline void greth_write_bd(u32 *bd, u32 val)
+{
+ __raw_writel(cpu_to_be32(val), bd);
+}
+
+static inline u32 greth_read_bd(u32 *bd)
+{
+ return be32_to_cpu(__raw_readl(bd));
+}
+
+static void greth_clean_rings(struct greth_private *greth)
+{
+ int i;
+ struct greth_bd *rx_bdp = greth->rx_bd_base;
+ struct greth_bd *tx_bdp = greth->tx_bd_base;
+
+ if (greth->gbit_mac) {
+
+ /* Free and unmap RX buffers */
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ if (greth->rx_skbuff[i] != NULL) {
+ dev_kfree_skb(greth->rx_skbuff[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* TX buffers */
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+
+
+ } else { /* 10/100 Mbps MAC */
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ kfree(greth->rx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
+ kfree(greth->tx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int greth_init_rings(struct greth_private *greth)
+{
+ struct sk_buff *skb;
+ struct greth_bd *rx_bd, *tx_bd;
+ u32 dma_addr;
+ int i;
+
+ rx_bd = greth->rx_bd_base;
+ tx_bd = greth->tx_bd_base;
+
+ /* Initialize descriptor rings and buffers */
+ if (greth->gbit_mac) {
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+ skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
+ if (skb == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(greth->dev,
+ skb->data,
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth->rx_skbuff[i] = skb;
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+
+ } else {
+
+ /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+
+ greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->rx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->rx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++) {
+
+ greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->tx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->tx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&tx_bd[i].addr, dma_addr);
+ greth_write_bd(&tx_bd[i].stat, 0);
+ }
+ }
+ greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
+ greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
+
+ /* Initialize pointers. */
+ greth->rx_cur = 0;
+ greth->tx_next = 0;
+ greth->tx_last = 0;
+ greth->tx_free = GRETH_TXBD_NUM;
+
+ /* Initialize descriptor base address */
+ GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
+ GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
+
+ return 0;
+
+cleanup:
+ greth_clean_rings(greth);
+ return -ENOMEM;
+}
+
+static int greth_open(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ int err;
+
+ err = greth_init_rings(greth);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
+ return err;
+ }
+
+ err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
+ greth_clean_rings(greth);
+ return err;
+ }
+
+ if (netif_msg_ifup(greth))
+ dev_dbg(&dev->dev, " starting queue\n");
+ netif_start_queue(dev);
+
+ napi_enable(&greth->napi);
+
+ greth_enable_irqs(greth);
+ greth_enable_tx(greth);
+ greth_enable_rx(greth);
+ return 0;
+
+}
+
+static int greth_close(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ napi_disable(&greth->napi);
+
+ greth_disable_tx(greth);
+
+ netif_stop_queue(dev);
+
+ free_irq(greth->irq, (void *) dev);
+
+ greth_clean_rings(greth);
+
+ return 0;
+}
+
+static netdev_tx_t
+greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ int err = NETDEV_TX_OK;
+ u32 status, dma_addr;
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+
+ if (unlikely(greth->tx_free <= 0)) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ dma_addr = greth_read_bd(&bdp->addr);
+
+ memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+
+ status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+
+ /* Wrap around descriptor ring */
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ greth->tx_next = NEXT_TX(greth->tx_next);
+ greth->tx_free--;
+
+ /* No more descriptors */
+ if (unlikely(greth->tx_free == 0)) {
+
+ /* Free transmitted descriptors */
+ greth_clean_tx(dev);
+
+ /* If nothing was cleaned, stop queue & wait for irq */
+ if (unlikely(greth->tx_free == 0)) {
+ status |= GRETH_BD_IE;
+ netif_stop_queue(dev);
+ }
+ }
+
+ /* Write descriptor control word and enable transmission */
+ greth_write_bd(&bdp->stat, status);
+ greth_enable_tx(greth);
+
+out:
+ dev_kfree_skb(skb);
+ return err;
+}
+
+
+static netdev_tx_t
+greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ u32 status = 0, dma_addr;
+ int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (greth->tx_free < nr_frags + 1) {
+ netif_stop_queue(dev);
+ err = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ /* Save skb pointer. */
+ greth->tx_skbuff[greth->tx_next] = skb;
+
+ /* Linear buf */
+ if (nr_frags != 0)
+ status = GRETH_TXBD_MORE;
+
+ status |= GRETH_TXBD_CSALL;
+ status |= skb_headlen(skb) & GRETH_BD_LEN;
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, status);
+ dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(greth->tx_next);
+
+ /* Frags */
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ greth->tx_skbuff[curr_tx] = NULL;
+ bdp = greth->tx_bd_base + curr_tx;
+
+ status = GRETH_TXBD_CSALL;
+ status |= frag->size & GRETH_BD_LEN;
+
+ /* Wrap around descriptor ring */
+ if (curr_tx == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+ /* More fragments left */
+ if (i < nr_frags - 1)
+ status |= GRETH_TXBD_MORE;
+
+ /* ... last fragment, check if out of descriptors */
+ else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
+
+ /* Enable interrupts and stop queue */
+ status |= GRETH_BD_IE;
+ netif_stop_queue(dev);
+ }
+
+ greth_write_bd(&bdp->stat, status);
+
+ dma_addr = dma_map_page(greth->dev,
+ frag->page,
+ frag->page_offset,
+ frag->size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto frag_map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(curr_tx);
+ }
+
+ wmb();
+
+ /* Enable the descriptors that we configured ... */
+ for (i = 0; i < nr_frags + 1; i++) {
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+ greth->tx_next = NEXT_TX(greth->tx_next);
+ greth->tx_free--;
+ }
+
+ greth_enable_tx(greth);
+
+ return NETDEV_TX_OK;
+
+frag_map_error:
+ /* Unmap SKB mappings that succeeded */
+ for (i = 0; greth->tx_next + i != curr_tx; i++) {
+ bdp = greth->tx_bd_base + greth->tx_next + i;
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
+ DMA_TO_DEVICE);
+ }
+map_error:
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create TX DMA mapping\n");
+ dev_kfree_skb(skb);
+out:
+ return err;
+}
+
+
+static irqreturn_t greth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct greth_private *greth;
+ u32 status;
+ irqreturn_t retval = IRQ_NONE;
+
+ greth = netdev_priv(dev);
+
+ spin_lock(&greth->devlock);
+
+ /* Get the interrupt events that caused us to be here. */
+ status = GRETH_REGLOAD(greth->regs->status);
+
+ /* Handle rx and tx interrupts through poll */
+ if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
+
+ /* Clear interrupt status */
+ GRETH_REGORIN(greth->regs->status,
+ status & (GRETH_INT_RX | GRETH_INT_TX));
+
+ retval = IRQ_HANDLED;
+
+ /* Disable interrupts and schedule poll() */
+ greth_disable_irqs(greth);
+ napi_schedule(&greth->napi);
+ }
+
+ mmiowb();
+ spin_unlock(&greth->devlock);
+
+ return retval;
+}
+
+static void greth_clean_tx(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ u32 stat;
+
+ greth = netdev_priv(dev);
+
+ while (1) {
+ bdp = greth->tx_bd_base + greth->tx_last;
+ stat = greth_read_bd(&bdp->stat);
+
+ if (unlikely(stat & GRETH_BD_EN))
+ break;
+
+ if (greth->tx_free == GRETH_TXBD_NUM)
+ break;
+
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ }
+ dev->stats.tx_packets++;
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ greth->tx_free++;
+ }
+
+ if (greth->tx_free > 0) {
+ netif_wake_queue(dev);
+ }
+
+}
+
+static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
+{
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ if (stat & GRETH_TXBD_ERR_LC)
+ dev->stats.tx_aborted_errors++;
+ }
+ dev->stats.tx_packets++;
+}
+
+static void greth_clean_tx_gbit(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp, *bdp_last_frag;
+ struct sk_buff *skb;
+ u32 stat;
+ int nr_frags, i;
+
+ greth = netdev_priv(dev);
+
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ skb = greth->tx_skbuff[greth->tx_last];
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* We only clean fully completed SKBs */
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+ stat = bdp_last_frag->stat;
+
+ if (stat & GRETH_BD_EN)
+ break;
+
+ greth->tx_skbuff[greth->tx_last] = NULL;
+
+ greth_update_tx_stats(dev, stat);
+
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+ if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
+ netif_wake_queue(dev);
+ }
+}
+
+static int greth_pending_packets(struct greth_private *greth)
+{
+ struct greth_bd *bdp;
+ u32 status;
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ status = greth_read_bd(&bdp->stat);
+ if (status & GRETH_BD_EN)
+ return 0;
+ else
+ return 1;
+}
+
+static int greth_rx(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb;
+ int pkt_len;
+ int bad, count;
+ u32 status, dma_addr;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ status = greth_read_bd(&bdp->stat);
+ dma_addr = greth_read_bd(&bdp->addr);
+ bad = 0;
+
+ if (unlikely(status & GRETH_BD_EN)) {
+ break;
+ }
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ }
+ if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ }
+ if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+ if (unlikely(bad)) {
+ dev->stats.rx_errors++;
+
+ } else {
+
+ pkt_len = status & GRETH_BD_LEN;
+
+ skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
+
+ if (unlikely(skb == NULL)) {
+
+ if (net_ratelimit())
+ dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
+
+ dev->stats.rx_dropped++;
+
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = dev;
+
+ dma_sync_single_for_cpu(greth->dev,
+ dma_addr,
+ pkt_len,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
+
+ memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ netif_receive_skb(skb);
+ }
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+
+ greth_enable_rx(greth);
+
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+}
+
+static inline int hw_checksummed(u32 status)
+{
+
+ if (status & GRETH_RXBD_IP_FRAG)
+ return 0;
+
+ if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
+ return 0;
+
+ return 1;
+}
+
+static int greth_rx_gbit(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb, *newskb;
+ int pkt_len;
+ int bad, count = 0;
+ u32 status, dma_addr;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ skb = greth->rx_skbuff[greth->rx_cur];
+ status = greth_read_bd(&bdp->stat);
+ bad = 0;
+
+ if (status & GRETH_BD_EN)
+ break;
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ } else if (status &
+ (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ } else if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+
+ /* Allocate new skb to replace current */
+ newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
+
+ if (!bad && newskb) {
+ skb_reserve(newskb, NET_IP_ALIGN);
+
+ dma_addr = dma_map_single(greth->dev,
+ newskb->data,
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (!dma_mapping_error(greth->dev, dma_addr)) {
+ /* Process the incoming frame. */
+ pkt_len = status & GRETH_BD_LEN;
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
+
+ skb_put(skb, pkt_len);
+
+ if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ netif_receive_skb(skb);
+
+ greth->rx_skbuff[greth->rx_cur] = newskb;
+ greth_write_bd(&bdp->addr, dma_addr);
+ } else {
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
+ dev_kfree_skb(newskb);
+ dev->stats.rx_dropped++;
+ }
+ } else {
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+ dev->stats.rx_dropped++;
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+ greth_enable_rx(greth);
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+
+}
+
+static int greth_poll(struct napi_struct *napi, int budget)
+{
+ struct greth_private *greth;
+ int work_done = 0;
+ greth = container_of(napi, struct greth_private, napi);
+
+ if (greth->gbit_mac) {
+ greth_clean_tx_gbit(greth->netdev);
+ } else {
+ greth_clean_tx(greth->netdev);
+ }
+
+restart_poll:
+ if (greth->gbit_mac) {
+ work_done += greth_rx_gbit(greth->netdev, budget - work_done);
+ } else {
+ work_done += greth_rx(greth->netdev, budget - work_done);
+ }
+
+ if (work_done < budget) {
+
+ napi_complete(napi);
+
+ if (greth_pending_packets(greth)) {
+ napi_reschedule(napi);
+ goto restart_poll;
+ }
+ }
+
+ greth_enable_irqs(greth);
+ return work_done;
+}
+
+static int greth_set_mac_add(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ greth = netdev_priv(dev);
+ regs = (struct greth_regs *) greth->regs;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
+ GRETH_REGSAVE(regs->esa_lsb,
+ addr->sa_data[2] << 24 | addr->
+ sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
+ return 0;
+}
+
+static u32 greth_hash_get_index(__u8 *addr)
+{
+ return (ether_crc(6, addr)) & 0x3F;
+}
+
+static void greth_set_hash_filter(struct net_device *dev)
+{
+ struct dev_mc_list *curr;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ u32 mc_filter[2];
+ unsigned int bitnr;
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(curr, dev) {
+ bitnr = greth_hash_get_index(curr->dmi_addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
+ GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
+}
+
+static void greth_set_multicast_list(struct net_device *dev)
+{
+ int cfg;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+
+ cfg = GRETH_REGLOAD(regs->control);
+ if (dev->flags & IFF_PROMISC)
+ cfg |= GRETH_CTRL_PR;
+ else
+ cfg &= ~GRETH_CTRL_PR;
+
+ if (greth->multicast) {
+ if (dev->flags & IFF_ALLMULTI) {
+ GRETH_REGSAVE(regs->hash_msb, -1);
+ GRETH_REGSAVE(regs->hash_lsb, -1);
+ cfg |= GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ if (netdev_mc_empty(dev)) {
+ cfg &= ~GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ /* Setup multicast filter */
+ greth_set_hash_filter(dev);
+ cfg |= GRETH_CTRL_MCEN;
+ }
+ GRETH_REGSAVE(regs->control, cfg);
+}
+
+static u32 greth_get_msglevel(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ return greth->msg_enable;
+}
+
+static void greth_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ greth->msg_enable = value;
+}
+static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phy, cmd);
+}
+
+static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phy, cmd);
+}
+
+static int greth_get_regs_len(struct net_device *dev)
+{
+ return sizeof(struct greth_regs);
+}
+
+static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ strncpy(info->driver, dev_driver_string(greth->dev), 32);
+ strncpy(info->version, "revision: 1.0", 32);
+ strncpy(info->bus_info, greth->dev->bus->name, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ info->eedump_len = 0;
+ info->regdump_len = sizeof(struct greth_regs);
+}
+
+static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct greth_private *greth = netdev_priv(dev);
+ u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
+ buff[i] = greth_read_bd(&greth_regs[i]);
+}
+
+static u32 greth_get_rx_csum(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
+}
+
+static int greth_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ spin_lock_bh(&greth->devlock);
+
+ if (data)
+ greth->flags |= GRETH_FLAG_RX_CSUM;
+ else
+ greth->flags &= ~GRETH_FLAG_RX_CSUM;
+
+ spin_unlock_bh(&greth->devlock);
+
+ return 0;
+}
+
+static u32 greth_get_tx_csum(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int greth_set_tx_csum(struct net_device *dev, u32 data)
+{
+ netif_tx_lock_bh(dev);
+ ethtool_op_set_tx_csum(dev, data);
+ netif_tx_unlock_bh(dev);
+ return 0;
+}
+
+static const struct ethtool_ops greth_ethtool_ops = {
+ .get_msglevel = greth_get_msglevel,
+ .set_msglevel = greth_set_msglevel,
+ .get_settings = greth_get_settings,
+ .set_settings = greth_set_settings,
+ .get_drvinfo = greth_get_drvinfo,
+ .get_regs_len = greth_get_regs_len,
+ .get_regs = greth_get_regs,
+ .get_rx_csum = greth_get_rx_csum,
+ .set_rx_csum = greth_set_rx_csum,
+ .get_tx_csum = greth_get_tx_csum,
+ .set_tx_csum = greth_set_tx_csum,
+ .get_link = ethtool_op_get_link,
+};
+
+static struct net_device_ops greth_netdev_ops = {
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+ .ndo_set_mac_address = greth_set_mac_add,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static inline int wait_for_mdio(struct greth_private *greth)
+{
+ unsigned long timeout = jiffies + 4*HZ/100;
+ while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
+ if (time_after(jiffies, timeout))
+ return 0;
+ }
+ return 1;
+}
+
+static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct greth_private *greth = bus->priv;
+ int data;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
+ data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
+ return data;
+
+ } else {
+ return -1;
+ }
+}
+
+static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct greth_private *greth = bus->priv;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio,
+ ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int greth_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void greth_link_change(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phydev = greth->phy;
+ unsigned long flags;
+
+ int status_change = 0;
+
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ if (phydev->link) {
+
+ if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
+
+ GRETH_REGANDIN(greth->regs->control,
+ ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+
+ if (phydev->duplex)
+ GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
+
+ if (phydev->speed == SPEED_100) {
+
+ GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
+ }
+
+ else if (phydev->speed == SPEED_1000)
+ GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+
+ greth->speed = phydev->speed;
+ greth->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != greth->link) {
+ if (!phydev->link) {
+ greth->speed = 0;
+ greth->duplex = -1;
+ }
+ greth->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ pr_debug("%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ pr_debug("%s: link down\n", dev->name);
+ }
+}
+
+static int greth_mdio_probe(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = NULL;
+ int ret;
+
+ /* Find the first PHY */
+ phy = phy_find_first(greth->mdio);
+
+ if (!phy) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "no PHY found\n");
+ return -ENXIO;
+ }
+
+ ret = phy_connect_direct(dev, phy, &greth_link_change,
+ 0, greth->gbit_mac ?
+ PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return ret;
+ }
+
+ if (greth->gbit_mac)
+ phy->supported &= PHY_GBIT_FEATURES;
+ else
+ phy->supported &= PHY_BASIC_FEATURES;
+
+ phy->advertising = phy->supported;
+
+ greth->link = 0;
+ greth->speed = 0;
+ greth->duplex = -1;
+ greth->phy = phy;
+
+ return 0;
+}
+
+static inline int phy_aneg_done(struct phy_device *phydev)
+{
+ int retval;
+
+ retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+
+static int greth_mdio_init(struct greth_private *greth)
+{
+ int ret, phy;
+ unsigned long timeout;
+
+ greth->mdio = mdiobus_alloc();
+ if (!greth->mdio) {
+ return -ENOMEM;
+ }
+
+ greth->mdio->name = "greth-mdio";
+ snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
+ greth->mdio->read = greth_mdio_read;
+ greth->mdio->write = greth_mdio_write;
+ greth->mdio->reset = greth_mdio_reset;
+ greth->mdio->priv = greth;
+
+ greth->mdio->irq = greth->mdio_irqs;
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
+ greth->mdio->irq[phy] = PHY_POLL;
+
+ ret = mdiobus_register(greth->mdio);
+ if (ret) {
+ goto error;
+ }
+
+ ret = greth_mdio_probe(greth->netdev);
+ if (ret) {
+ if (netif_msg_probe(greth))
+ dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
+ goto unreg_mdio;
+ }
+
+ phy_start(greth->phy);
+
+ /* If Ethernet debug link is used make autoneg happen right away */
+ if (greth->edcl && greth_edcl == 1) {
+ phy_start_aneg(greth->phy);
+ timeout = jiffies + 6*HZ;
+ while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
+ }
+ genphy_read_status(greth->phy);
+ greth_link_change(greth->netdev);
+ }
+
+ return 0;
+
+unreg_mdio:
+ mdiobus_unregister(greth->mdio);
+error:
+ mdiobus_free(greth->mdio);
+ return ret;
+}
+
+/* Initialize the GRETH MAC */
+static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+{
+ struct net_device *dev;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ int i;
+ int err;
+ int tmp;
+ unsigned long timeout;
+
+ dev = alloc_etherdev(sizeof(struct greth_private));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ greth = netdev_priv(dev);
+ greth->netdev = dev;
+ greth->dev = &ofdev->dev;
+
+ if (greth_debug > 0)
+ greth->msg_enable = greth_debug;
+ else
+ greth->msg_enable = GRETH_DEF_MSG_ENABLE;
+
+ spin_lock_init(&greth->devlock);
+
+ greth->regs = of_ioremap(&ofdev->resource[0], 0,
+ resource_size(&ofdev->resource[0]),
+ "grlib-greth regs");
+
+ if (greth->regs == NULL) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "ioremap failure.\n");
+ err = -EIO;
+ goto error1;
+ }
+
+ regs = (struct greth_regs *) greth->regs;
+ greth->irq = ofdev->irqs[0];
+
+ dev_set_drvdata(greth->dev, dev);
+ SET_NETDEV_DEV(dev, greth->dev);
+
+ if (netif_msg_probe(greth))
+ dev_dbg(greth->dev, "reseting controller.\n");
+
+ /* Reset the controller. */
+ GRETH_REGSAVE(regs->control, GRETH_RESET);
+
+ /* Wait for MAC to reset itself */
+ timeout = jiffies + HZ/100;
+ while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
+ if (time_after(jiffies, timeout)) {
+ err = -EIO;
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "timeout when waiting for reset.\n");
+ goto error2;
+ }
+ }
+
+ /* Get default PHY address */
+ greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
+
+ /* Check if we have GBIT capable MAC */
+ tmp = GRETH_REGLOAD(regs->control);
+ greth->gbit_mac = (tmp >> 27) & 1;
+
+ /* Check for multicast capability */
+ greth->multicast = (tmp >> 25) & 1;
+
+ greth->edcl = (tmp >> 31) & 1;
+
+ /* If we have EDCL we disable the EDCL speed-duplex FSM so
+ * it doesn't interfere with the software */
+ if (greth->edcl != 0)
+ GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
+
+ /* Check if MAC can handle MDIO interrupts */
+ greth->mdio_int_en = (tmp >> 26) & 1;
+
+ err = greth_mdio_init(greth);
+ if (err) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "failed to register MDIO bus\n");
+ goto error2;
+ }
+
+ /* Allocate TX descriptor ring in coherent memory */
+ greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->tx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error3;
+ }
+
+ memset(greth->tx_bd_base, 0, 1024);
+
+ /* Allocate RX descriptor ring in coherent memory */
+ greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->rx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error4;
+ }
+
+ memset(greth->rx_bd_base, 0, 1024);
+
+ /* Get MAC address from: module param, OF property or ID prom */
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i == 6) {
+ const unsigned char *addr;
+ int len;
+ addr = of_get_property(ofdev->node, "local-mac-address", &len);
+ if (addr != NULL && len == 6) {
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) addr[i];
+ } else {
+#ifdef CONFIG_SPARC
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
+#endif
+ }
+ }
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+
+ macaddr[5]++;
+
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "no valid ethernet address, aborting.\n");
+ err = -EINVAL;
+ goto error5;
+ }
+
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
+
+ /* Clear all pending interrupts except PHY irq */
+ GRETH_REGSAVE(regs->status, 0xFF);
+
+ if (greth->gbit_mac) {
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
+ greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
+ greth->flags = GRETH_FLAG_RX_CSUM;
+ }
+
+ if (greth->multicast) {
+ greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+ } else {
+ dev->flags &= ~IFF_MULTICAST;
+ }
+
+ dev->netdev_ops = &greth_netdev_ops;
+ dev->ethtool_ops = &greth_ethtool_ops;
+
+ if (register_netdev(dev)) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "netdevice registration failed.\n");
+ err = -ENOMEM;
+ goto error5;
+ }
+
+ /* setup NAPI */
+ memset(&greth->napi, 0, sizeof(greth->napi));
+ netif_napi_add(dev, &greth->napi, greth_poll, 64);
+
+ return 0;
+
+error5:
+ dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+error4:
+ dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+error3:
+ mdiobus_unregister(greth->mdio);
+error2:
+ of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
+error1:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit greth_of_remove(struct of_device *of_dev)
+{
+ struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
+ struct greth_private *greth = netdev_priv(ndev);
+
+ /* Free descriptor areas */
+ dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+
+ dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+
+ dev_set_drvdata(&of_dev->dev, NULL);
+
+ if (greth->phy)
+ phy_stop(greth->phy);
+ mdiobus_unregister(greth->mdio);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
+
+ return 0;
+}
+
+static struct of_device_id greth_of_match[] = {
+ {
+ .name = "GAISLER_ETHMAC",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, greth_of_match);
+
+static struct of_platform_driver greth_of_driver = {
+ .name = "grlib-greth",
+ .match_table = greth_of_match,
+ .probe = greth_of_probe,
+ .remove = __devexit_p(greth_of_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "grlib-greth",
+ },
+};
+
+static int __init greth_init(void)
+{
+ return of_register_platform_driver(&greth_of_driver);
+}
+
+static void __exit greth_cleanup(void)
+{
+ of_unregister_platform_driver(&greth_of_driver);
+}
+
+module_init(greth_init);
+module_exit(greth_cleanup);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
new file mode 100644
index 000000000000..973388d6abca
--- /dev/null
+++ b/drivers/net/greth.h
@@ -0,0 +1,143 @@
+#ifndef GRETH_H
+#define GRETH_H
+
+#include <linux/phy.h>
+
+/* Register bits and masks */
+#define GRETH_RESET 0x40
+#define GRETH_MII_BUSY 0x8
+#define GRETH_MII_NVALID 0x10
+
+#define GRETH_CTRL_FD 0x10
+#define GRETH_CTRL_PR 0x20
+#define GRETH_CTRL_SP 0x80
+#define GRETH_CTRL_GB 0x100
+#define GRETH_CTRL_PSTATIEN 0x400
+#define GRETH_CTRL_MCEN 0x800
+#define GRETH_CTRL_DISDUPLEX 0x1000
+#define GRETH_STATUS_PHYSTAT 0x100
+
+#define GRETH_BD_EN 0x800
+#define GRETH_BD_WR 0x1000
+#define GRETH_BD_IE 0x2000
+#define GRETH_BD_LEN 0x7FF
+
+#define GRETH_TXEN 0x1
+#define GRETH_INT_TX 0x8
+#define GRETH_TXI 0x4
+#define GRETH_TXBD_STATUS 0x0001C000
+#define GRETH_TXBD_MORE 0x20000
+#define GRETH_TXBD_IPCS 0x40000
+#define GRETH_TXBD_TCPCS 0x80000
+#define GRETH_TXBD_UDPCS 0x100000
+#define GRETH_TXBD_CSALL (GRETH_TXBD_IPCS | GRETH_TXBD_TCPCS | GRETH_TXBD_UDPCS)
+#define GRETH_TXBD_ERR_LC 0x10000
+#define GRETH_TXBD_ERR_UE 0x4000
+#define GRETH_TXBD_ERR_AL 0x8000
+
+#define GRETH_INT_RX 0x4
+#define GRETH_RXEN 0x2
+#define GRETH_RXI 0x8
+#define GRETH_RXBD_STATUS 0xFFFFC000
+#define GRETH_RXBD_ERR_AE 0x4000
+#define GRETH_RXBD_ERR_FT 0x8000
+#define GRETH_RXBD_ERR_CRC 0x10000
+#define GRETH_RXBD_ERR_OE 0x20000
+#define GRETH_RXBD_ERR_LE 0x40000
+#define GRETH_RXBD_IP 0x80000
+#define GRETH_RXBD_IP_CSERR 0x100000
+#define GRETH_RXBD_UDP 0x200000
+#define GRETH_RXBD_UDP_CSERR 0x400000
+#define GRETH_RXBD_TCP 0x800000
+#define GRETH_RXBD_TCP_CSERR 0x1000000
+#define GRETH_RXBD_IP_FRAG 0x2000000
+#define GRETH_RXBD_MCAST 0x4000000
+
+/* Descriptor parameters */
+#define GRETH_TXBD_NUM 128
+#define GRETH_TXBD_NUM_MASK (GRETH_TXBD_NUM-1)
+#define GRETH_TX_BUF_SIZE 2048
+#define GRETH_RXBD_NUM 128
+#define GRETH_RXBD_NUM_MASK (GRETH_RXBD_NUM-1)
+#define GRETH_RX_BUF_SIZE 2048
+
+/* Buffers per page */
+#define GRETH_RX_BUF_PPGAE (PAGE_SIZE/GRETH_RX_BUF_SIZE)
+#define GRETH_TX_BUF_PPGAE (PAGE_SIZE/GRETH_TX_BUF_SIZE)
+
+/* How many pages are needed for buffers */
+#define GRETH_RX_BUF_PAGE_NUM (GRETH_RXBD_NUM/GRETH_RX_BUF_PPGAE)
+#define GRETH_TX_BUF_PAGE_NUM (GRETH_TXBD_NUM/GRETH_TX_BUF_PPGAE)
+
+/* Buffer size.
+ * Gbit MAC uses tagged maximum frame size which is 1518 excluding CRC.
+ * Set to 1520 to make all buffers word aligned for non-gbit MAC.
+ */
+#define MAX_FRAME_SIZE 1520
+
+/* Flags */
+#define GRETH_FLAG_RX_CSUM 0x1
+
+/* GRETH APB registers */
+struct greth_regs {
+ u32 control;
+ u32 status;
+ u32 esa_msb;
+ u32 esa_lsb;
+ u32 mdio;
+ u32 tx_desc_p;
+ u32 rx_desc_p;
+ u32 edclip;
+ u32 hash_msb;
+ u32 hash_lsb;
+};
+
+/* GRETH buffer descriptor */
+struct greth_bd {
+ u32 stat;
+ u32 addr;
+};
+
+struct greth_private {
+ struct sk_buff *rx_skbuff[GRETH_RXBD_NUM];
+ struct sk_buff *tx_skbuff[GRETH_TXBD_NUM];
+
+ unsigned char *tx_bufs[GRETH_TXBD_NUM];
+ unsigned char *rx_bufs[GRETH_RXBD_NUM];
+
+ u16 tx_next;
+ u16 tx_last;
+ u16 tx_free;
+ u16 rx_cur;
+
+ struct greth_regs *regs; /* Address of controller registers. */
+ struct greth_bd *rx_bd_base; /* Address of Rx BDs. */
+ struct greth_bd *tx_bd_base; /* Address of Tx BDs. */
+ dma_addr_t rx_bd_base_phys;
+ dma_addr_t tx_bd_base_phys;
+
+ int irq;
+
+ struct device *dev; /* Pointer to of_device->dev */
+ struct net_device *netdev;
+ struct napi_struct napi;
+ spinlock_t devlock;
+
+ struct phy_device *phy;
+ struct mii_bus *mdio;
+ int mdio_irqs[PHY_MAX_ADDR];
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ u32 msg_enable;
+ u32 flags;
+
+ u8 phyaddr;
+ u8 multicast;
+ u8 gbit_mac;
+ u8 mdio_int_en;
+ u8 edcl;
+};
+
+#endif
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a2..5d6f13879592 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -153,7 +153,6 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
@@ -1854,17 +1853,18 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writew(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
struct dev_mc_list *mclist;
- int i;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ int i = 0;
+
+ netdev_for_each_mc_addr(mclist, dev) {
writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
ioaddr + 0x104 + i*8);
+ i++;
}
/* Clear remaining entries. */
for (; i < 64; i++)
@@ -1990,7 +1990,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
}
}
-static struct pci_device_id hamachi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 689b9bd377a5..4b52c767ad05 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -24,6 +24,7 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/timer.h>
+#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ae5f11c8fc13..14f01d156db9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -61,6 +61,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/net.h>
+#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
@@ -248,6 +249,7 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
struct bpqdev *bpq;
+ struct net_device *orig_dev;
int size;
/*
@@ -282,8 +284,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
bpq = netdev_priv(dev);
+ orig_dev = dev;
if ((dev = bpq_get_ether_dev(dev)) == NULL) {
- dev->stats.tx_dropped++;
+ orig_dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 9ee76b42668f..52b14256e2c0 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 91c5790c9581..b8bdf9d51cd4 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -48,7 +48,6 @@
#include <linux/net.h>
#include <linux/in.h>
#include <linux/if.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/bitops.h>
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 7db0a1c3216c..66e88bd59caa 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
+#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 35c936175bba..f3a96b843911 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -158,7 +158,6 @@
#include <linux/in.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e1..4daad8cd56ea 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -102,7 +102,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/eisa.h>
#include <linux/pci.h>
@@ -210,7 +209,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
#endif
#ifdef CONFIG_PCI
-static struct pci_device_id hp100_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
@@ -2090,7 +2089,7 @@ static void hp100_set_multicast_list(struct net_device *dev)
lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
memset(&lp->hash_bytes, 0xff, 8);
- } else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
@@ -2098,22 +2097,23 @@ static void hp100_set_multicast_list(struct net_device *dev)
/* set hash filter to receive all multicast packets */
memset(&lp->hash_bytes, 0xff, 8);
} else {
- int i, j, idx;
+ int i, idx;
u_char *addrs;
struct dev_mc_list *dmi;
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
- printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, dev->mc_count);
+ printk("hp100: %s: computing hash filter - mc_count = %i\n",
+ dev->name, netdev_mc_count(dev));
#endif
- for (i = 0, dmi = dev->mc_list; i < dev->mc_count; i++, dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
printk("hp100: %s: multicast = %pM, ",
dev->name, addrs);
#endif
- for (j = idx = 0; j < 6; j++) {
+ for (i = idx = 0; i < 6; i++) {
idx ^= *addrs++ & 0x3f;
printk(":%02x:", idx);
}
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index 3e3528ade259..b6060f7538df 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index d496b6f4a478..24724b4ad709 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index fb5e019169ee..dd873cc41c2b 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -391,11 +392,11 @@ static void emac_hash_mc(struct emac_instance *dev)
struct dev_mc_list *dmi;
int i;
- DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
+ DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
- for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev->ndev) {
int slot, reg, mask;
DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
@@ -425,9 +426,9 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC)
r |= EMAC_RMR_PME;
else if (ndev->flags & IFF_ALLMULTI ||
- (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
+ (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
r |= EMAC_RMR_PMME;
- else if (ndev->mc_count > 0)
+ else if (!netdev_mc_empty(ndev))
r |= EMAC_RMR_MAE;
return r;
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index 18d56c6c4238..b1cbe6fdfc7a 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -34,6 +34,7 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/of_platform.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/dcr.h>
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 2a2fc17b2878..5b3d94419fe6 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -26,6 +26,7 @@
*/
#include <linux/delay.h>
+#include <linux/slab.h>
#include "core.h"
#include <asm/dcr-regs.h>
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 8d76cb89dbd6..5b90d34c8455 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -21,6 +21,7 @@
* option) any later version.
*
*/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <asm/io.h>
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 17b154124943..1f038f808ab3 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -21,6 +21,7 @@
* option) any later version.
*
*/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <asm/io.h>
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 090a6d3af112..7d6cf3340c11 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -79,7 +79,6 @@ History:
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/time.h>
@@ -87,6 +86,7 @@ History:
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@@ -419,7 +419,7 @@ static void InitBoard(struct net_device *dev)
/* start putting the multicast addresses into the CAM list. Stop if
it is full. */
- for (mcptr = dev->mc_list; mcptr != NULL; mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
putcam(cams, &camcnt, mcptr->dmi_addr);
if (camcnt == 16)
break;
@@ -988,7 +988,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
/* copy out MAC address */
- for (z = 0; z < sizeof(dev->dev_addr); z++)
+ for (z = 0; z < ETH_ALEN; z++)
dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
/* print config */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a86693906ac8..cd508a8ee25b 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -49,6 +49,7 @@
#include <linux/proc_fs.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/slab.h>
#include <net/net_namespace.h>
#include <asm/hvcall.h>
#include <asm/atomic.h>
@@ -1062,7 +1063,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
unsigned long lpar_rc;
- if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
@@ -1071,8 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
- struct dev_mc_list *mclist = netdev->mc_list;
- int i;
+ struct dev_mc_list *mclist;
/* clear the filter table & disable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
@@ -1083,7 +1084,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, netdev) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
@@ -1577,7 +1578,7 @@ static struct attribute * veth_pool_attrs[] = {
NULL,
};
-static struct sysfs_ops veth_pool_ops = {
+static const struct sysfs_ops veth_pool_ops = {
.show = veth_pool_show,
.store = veth_pool_store,
};
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e8e9e9194a88..4a32bed77c71 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -30,7 +30,6 @@
*/
#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/if_ether.h>
#include "e1000_mac.h"
@@ -94,6 +93,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
case E1000_DEV_ID_82576_SERDES_QUAD:
mac->type = e1000_82576;
break;
@@ -727,6 +727,34 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
}
/**
+ * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !igb_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+ reg = rd32(E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+ wr32(E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+ reg = rd32(E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+ wr32(E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ wrfl();
+ msleep(1);
+}
+
+/**
* igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
@@ -791,27 +819,12 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
- u16 eeprom_data = 0;
- if (hw->phy.media_type != e1000_media_type_internal_serdes ||
+ if (hw->phy.media_type != e1000_media_type_internal_serdes &&
igb_sgmii_active_82575(hw))
return;
- if (hw->bus.func == E1000_FUNC_0)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type == e1000_82580)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
- NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
- &eeprom_data);
- else if (hw->bus.func == E1000_FUNC_1)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
-
- /*
- * If APM is not enabled in the EEPROM and management interface is
- * not enabled, then power down.
- */
- if (!(eeprom_data & E1000_NVM_APME_82575) &&
- !igb_enable_mng_pass_thru(hw)) {
+ if (!igb_enable_mng_pass_thru(hw)) {
/* Disable PCS to turn off link */
reg = rd32(E1000_PCS_CFG0);
reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -826,8 +839,6 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
wrfl();
msleep(1);
}
-
- return;
}
/**
@@ -1096,9 +1107,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
- reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
- E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
- E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
@@ -1185,6 +1194,22 @@ out:
}
/**
+ * igb_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
+ igb_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
* igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index d51c9927c819..fbe1c99c193c 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -29,6 +29,8 @@
#define _E1000_82575_H_
extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
@@ -219,6 +221,9 @@ struct e1000_adv_tx_context_desc {
#define E1000_VLVF_LVLAN 0x00100000
#define E1000_VLVF_VLANID_ENABLE 0x80000000
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define E1000_IOVCTL 0x05BBC
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6e036ae3138f..fe6cf1b696c7 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -313,12 +313,6 @@
#define E1000_PBA_34K 0x0022
#define E1000_PBA_64K 0x0040 /* 64KB */
-#define IFS_MAX 80
-#define IFS_MIN 40
-#define IFS_RATIO 4
-#define IFS_STEP 10
-#define MIN_NUM_XMITS 1000
-
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -481,6 +475,7 @@
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index dbaeb5f5e0c7..82a533f5192a 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -41,6 +41,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
#define E1000_DEV_ID_82576_NS 0x150A
#define E1000_DEV_ID_82576_NS_SERDES 0x1518
#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
@@ -339,19 +340,12 @@ struct e1000_mac_info {
enum e1000_mac_type type;
- u32 collision_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 mc_filter_type;
- u32 tx_packet_delta;
u32 txcw;
- u16 current_ifs_val;
- u16 ifs_max_val;
- u16 ifs_min_val;
- u16 ifs_ratio;
- u16 ifs_step_size;
u16 mta_reg_count;
u16 uta_reg_count;
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2ad358a240bf..be8d010e4021 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1304,76 +1304,6 @@ out:
}
/**
- * igb_reset_adaptive - Reset Adaptive Interframe Spacing
- * @hw: pointer to the HW structure
- *
- * Reset the Adaptive Interframe Spacing throttle to default values.
- **/
-void igb_reset_adaptive(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
-
- if (!mac->adaptive_ifs) {
- hw_dbg("Not in Adaptive IFS mode!\n");
- goto out;
- }
-
- if (!mac->ifs_params_forced) {
- mac->current_ifs_val = 0;
- mac->ifs_min_val = IFS_MIN;
- mac->ifs_max_val = IFS_MAX;
- mac->ifs_step_size = IFS_STEP;
- mac->ifs_ratio = IFS_RATIO;
- }
-
- mac->in_ifs_mode = false;
- wr32(E1000_AIT, 0);
-out:
- return;
-}
-
-/**
- * igb_update_adaptive - Update Adaptive Interframe Spacing
- * @hw: pointer to the HW structure
- *
- * Update the Adaptive Interframe Spacing Throttle value based on the
- * time between transmitted packets and time between collisions.
- **/
-void igb_update_adaptive(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
-
- if (!mac->adaptive_ifs) {
- hw_dbg("Not in Adaptive IFS mode!\n");
- goto out;
- }
-
- if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
- if (mac->tx_packet_delta > MIN_NUM_XMITS) {
- mac->in_ifs_mode = true;
- if (mac->current_ifs_val < mac->ifs_max_val) {
- if (!mac->current_ifs_val)
- mac->current_ifs_val = mac->ifs_min_val;
- else
- mac->current_ifs_val +=
- mac->ifs_step_size;
- wr32(E1000_AIT,
- mac->current_ifs_val);
- }
- }
- } else {
- if (mac->in_ifs_mode &&
- (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
- mac->current_ifs_val = 0;
- mac->in_ifs_mode = false;
- wr32(E1000_AIT, 0);
- }
- }
-out:
- return;
-}
-
-/**
* igb_validate_mdi_setting - Verify MDI/MDIx settings
* @hw: pointer to the HW structure
*
@@ -1437,7 +1367,8 @@ out:
* igb_enable_mng_pass_thru - Enable processing of ARP's
* @hw: pointer to the HW structure
*
- * Verifies the hardware needs to allow ARPs to be processed by the host.
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
**/
bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
{
@@ -1450,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
manc = rd32(E1000_MANC);
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
goto out;
if (hw->mac.arc_subsystem_valid) {
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index bca17d882417..601be99711c2 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -67,8 +67,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
void igb_put_hw_semaphore(struct e1000_hw *hw);
void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
-void igb_reset_adaptive(struct e1000_hw *hw);
-void igb_update_adaptive(struct e1000_hw *hw);
bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 5c9d73e9bb8d..cf1f32300923 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
- if (ret_val)
- goto out;
-
- /* Set number of link attempts before downshift */
- ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
- if (ret_val)
- goto out;
- phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
- ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
out:
return ret_val;
@@ -1940,6 +1931,41 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
}
/**
+ * igb_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, restore the link to previous settings.
+ **/
+void igb_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * igb_power_down_phy_copper - Power down copper PHY
+ * @hw: pointer to the HW structure
+ *
+ * Power down PHY to save power when interface is down and wake on lan
+ * is not enabled.
+ **/
+void igb_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ msleep(1);
+}
+
+/**
* igb_check_polarity_82580 - Checks the polarity.
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 555eb54bb6ed..565a6dbb3714 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -60,6 +60,8 @@ s32 igb_setup_copper_link(struct e1000_hw *hw);
s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
+void igb_power_up_phy_copper(struct e1000_hw *hw);
+void igb_power_down_phy_copper(struct e1000_hw *hw);
s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index dd4e6ffd29f5..abb7333a1fbf 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -310,6 +310,7 @@
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index b1c1eb88893f..3b772b822a5d 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -75,11 +75,14 @@ struct vf_data_storage {
u16 vlans_enabled;
u32 flags;
unsigned long last_nack;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -92,13 +95,13 @@ struct vf_data_storage {
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
+#define IGB_RX_PTHRESH 8
#define IGB_RX_HTHRESH 8
#define IGB_RX_WTHRESH 1
#define IGB_TX_PTHRESH 8
#define IGB_TX_HTHRESH 1
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 0 : 16)
+ adapter->msix_entries) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -138,6 +141,7 @@ struct igb_buffer {
u16 length;
u16 next_to_watch;
u16 mapped_as_page;
+ u16 gso_segs;
};
/* RX */
struct {
@@ -173,7 +177,6 @@ struct igb_q_vector {
u16 itr_val;
u8 set_itr;
- u8 itr_shift;
void __iomem *itr_register;
char name[IFNAMSIZ + 9];
@@ -238,7 +241,6 @@ static inline int igb_desc_unused(struct igb_ring *ring)
}
/* board specific private data structure */
-
struct igb_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
@@ -264,12 +266,11 @@ struct igb_adapter {
unsigned long led_status;
/* TX */
- struct igb_ring *tx_ring; /* One per active queue */
- unsigned long tx_queue_len;
+ struct igb_ring *tx_ring[16];
u32 tx_timeout_count;
/* RX */
- struct igb_ring *rx_ring; /* One per active queue */
+ struct igb_ring *rx_ring[16];
int num_tx_queues;
int num_rx_queues;
@@ -354,7 +355,9 @@ extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
struct igb_buffer *);
extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
extern void igb_update_stats(struct igb_adapter *);
+extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
+extern void igb_power_up_link(struct igb_adapter *);
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ac9d5272650d..743038490104 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/ethtool.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include "igb.h"
@@ -234,6 +235,24 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
+static u32 igb_get_link(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ mac->get_link_status = 1;
+
+ return igb_has_link(adapter);
+}
+
static void igb_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -296,7 +315,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
static u32 igb_get_rx_csum(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
+ return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM);
}
static int igb_set_rx_csum(struct net_device *netdev, u32 data)
@@ -306,9 +325,9 @@ static int igb_set_rx_csum(struct net_device *netdev, u32 data)
for (i = 0; i < adapter->num_rx_queues; i++) {
if (data)
- adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
+ adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
else
- adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
+ adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
}
return 0;
@@ -771,9 +790,9 @@ static int igb_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
@@ -797,10 +816,10 @@ static int igb_set_ringparam(struct net_device *netdev,
* to the tx and rx ring structs.
*/
if (new_tx_count != adapter->tx_ring_count) {
- memcpy(temp_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct igb_ring));
-
for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
+ sizeof(struct igb_ring));
+
temp_ring[i].count = new_tx_count;
err = igb_setup_tx_resources(&temp_ring[i]);
if (err) {
@@ -812,20 +831,21 @@ static int igb_set_ringparam(struct net_device *netdev,
}
}
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_free_tx_resources(&adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ igb_free_tx_resources(adapter->tx_ring[i]);
- memcpy(adapter->tx_ring, temp_ring,
- adapter->num_tx_queues * sizeof(struct igb_ring));
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
- if (new_rx_count != adapter->rx_ring->count) {
- memcpy(temp_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct igb_ring));
-
+ if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
+ sizeof(struct igb_ring));
+
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
@@ -838,11 +858,12 @@ static int igb_set_ringparam(struct net_device *netdev,
}
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_free_rx_resources(&adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igb_free_rx_resources(adapter->rx_ring[i]);
- memcpy(adapter->rx_ring, temp_ring,
- adapter->num_rx_queues * sizeof(struct igb_ring));
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
adapter->rx_ring_count = new_rx_count;
}
@@ -1704,6 +1725,9 @@ static void igb_diag_test(struct net_device *netdev,
dev_info(&adapter->pdev->dev, "offline testing starting\n");
+ /* power up link for link test */
+ igb_power_up_link(adapter);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
if (igb_link_test(adapter, &data[4]))
@@ -1727,6 +1751,8 @@ static void igb_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter);
+ /* power up link for loopback test */
+ igb_power_up_link(adapter);
if (igb_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1745,9 +1771,14 @@ static void igb_diag_test(struct net_device *netdev,
dev_open(netdev);
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
- /* Online tests */
- if (igb_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* PHY is powered down when interface is down */
+ if (!netif_carrier_ok(netdev)) {
+ data[4] = 0;
+ } else {
+ if (igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
/* Online tests aren't run; pass by default */
data[0] = 0;
@@ -1783,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
retval = 0;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* quad port adapters only support WoL on port A */
if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
wol->supported = 0;
@@ -1795,7 +1827,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+ if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
@@ -1812,7 +1844,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct igb_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
@@ -1835,15 +1868,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
}
static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
if (igb_wol_exclusion(adapter, wol) ||
@@ -1861,6 +1894,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
@@ -2005,12 +2040,12 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
data[i] = queue_stat[k];
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
data[i] = queue_stat[k];
}
@@ -2074,7 +2109,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_msglevel = igb_get_msglevel,
.set_msglevel = igb_set_msglevel,
.nway_reset = igb_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = igb_get_link,
.get_eeprom_len = igb_get_eeprom_len,
.get_eeprom = igb_get_eeprom,
.set_eeprom = igb_set_eeprom,
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78963a0e128d..c9baa2aa98cd 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -32,6 +32,7 @@
#include <linux/pagemap.h>
#include <linux/netdevice.h>
#include <linux/ipv6.h>
+#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/net_tstamp.h>
@@ -60,7 +61,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static struct pci_device_id igb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -72,6 +73,7 @@ static struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
@@ -133,6 +135,12 @@ static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos);
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -312,31 +320,35 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
*/
if (adapter->vfs_allocated_count) {
for (; i < adapter->rss_queues; i++)
- adapter->rx_ring[i].reg_idx = rbase_offset +
- Q_IDX_82576(i);
+ adapter->rx_ring[i]->reg_idx = rbase_offset +
+ Q_IDX_82576(i);
for (; j < adapter->rss_queues; j++)
- adapter->tx_ring[j].reg_idx = rbase_offset +
- Q_IDX_82576(j);
+ adapter->tx_ring[j]->reg_idx = rbase_offset +
+ Q_IDX_82576(j);
}
case e1000_82575:
case e1000_82580:
default:
for (; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = rbase_offset + i;
+ adapter->rx_ring[i]->reg_idx = rbase_offset + i;
for (; j < adapter->num_tx_queues; j++)
- adapter->tx_ring[j].reg_idx = rbase_offset + j;
+ adapter->tx_ring[j]->reg_idx = rbase_offset + j;
break;
}
}
static void igb_free_queues(struct igb_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
adapter->num_rx_queues = 0;
adapter->num_tx_queues = 0;
}
@@ -350,20 +362,13 @@ static void igb_free_queues(struct igb_adapter *adapter)
**/
static int igb_alloc_queues(struct igb_adapter *adapter)
{
+ struct igb_ring *ring;
int i;
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct igb_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct igb_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err;
-
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *ring = &(adapter->tx_ring[i]);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (!ring)
+ goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
@@ -371,10 +376,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
+ adapter->tx_ring[i] = ring;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *ring = &(adapter->rx_ring[i]);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (!ring)
+ goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
@@ -384,6 +392,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
+ adapter->rx_ring[i] = ring;
}
igb_cache_ring_register(adapter);
@@ -421,6 +430,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+ if (!adapter->msix_entries && msix_vector == 0)
+ msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
break;
@@ -496,6 +507,12 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
BUG();
break;
}
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
}
/**
@@ -553,11 +570,8 @@ static void igb_configure_msix(struct igb_adapter *adapter)
adapter->eims_enable_mask |= adapter->eims_other;
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- igb_assign_vector(q_vector, vector++);
- adapter->eims_enable_mask |= q_vector->eims_value;
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igb_assign_vector(adapter->q_vector[i], vector++);
wrfl();
}
@@ -637,6 +651,8 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
adapter->q_vector[v_idx] = NULL;
+ if (!q_vector)
+ continue;
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
@@ -674,7 +690,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
/* start with one vector for every rx queue */
numvecs = adapter->num_rx_queues;
- /* if tx handler is seperate add 1 for every tx queue */
+ /* if tx handler is separate add 1 for every tx queue */
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
@@ -748,33 +764,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
- q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
q_vector->itr_val = IGB_START_ITR;
- q_vector->set_itr = 1;
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector;
}
return 0;
err_out:
- while (v_idx) {
- v_idx--;
- q_vector = adapter->q_vector[v_idx];
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- adapter->q_vector[v_idx] = NULL;
- }
+ igb_free_q_vectors(adapter);
return -ENOMEM;
}
static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
int ring_idx, int v_idx)
{
- struct igb_q_vector *q_vector;
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- q_vector = adapter->q_vector[v_idx];
- q_vector->rx_ring = &adapter->rx_ring[ring_idx];
+ q_vector->rx_ring = adapter->rx_ring[ring_idx];
q_vector->rx_ring->q_vector = q_vector;
q_vector->itr_val = adapter->rx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -784,10 +791,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
int ring_idx, int v_idx)
{
- struct igb_q_vector *q_vector;
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- q_vector = adapter->q_vector[v_idx];
- q_vector->tx_ring = &adapter->tx_ring[ring_idx];
+ q_vector->tx_ring = adapter->tx_ring[ring_idx];
q_vector->tx_ring->q_vector = q_vector;
q_vector->itr_val = adapter->tx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -877,7 +883,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
int err = 0;
if (adapter->msix_entries) {
@@ -909,20 +914,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
} else {
- switch (hw->mac.type) {
- case e1000_82575:
- wr32(E1000_MSIXBM(0),
- (E1000_EICR_RX_QUEUE0 |
- E1000_EICR_TX_QUEUE0 |
- E1000_EIMS_OTHER));
- break;
- case e1000_82580:
- case e1000_82576:
- wr32(E1000_IVAR0, E1000_IVAR_VALID);
- break;
- default:
- break;
- }
+ igb_assign_vector(adapter->q_vector[0], 0);
}
if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1111,14 +1103,34 @@ static void igb_configure(struct igb_adapter *adapter)
* at least 1 descriptor unused to make sure
* next_to_use != next_to_clean */
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *ring = &adapter->rx_ring[i];
+ struct igb_ring *ring = adapter->rx_ring[i];
igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
}
+}
-
- adapter->tx_queue_len = netdev->tx_queue_len;
+/**
+ * igb_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
+ **/
+void igb_power_up_link(struct igb_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ igb_power_up_phy_copper(&adapter->hw);
+ else
+ igb_power_up_serdes_link_82575(&adapter->hw);
}
+/**
+ * igb_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igb_power_down_link(struct igb_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ igb_power_down_phy_copper_82575(&adapter->hw);
+ else
+ igb_shutdown_serdes_link_82575(&adapter->hw);
+}
/**
* igb_up - Open the interface and prepare it to handle traffic
@@ -1140,6 +1152,8 @@ int igb_up(struct igb_adapter *adapter)
}
if (adapter->msix_entries)
igb_configure_msix(adapter);
+ else
+ igb_assign_vector(adapter->q_vector[0], 0);
/* Clear any pending interrupts. */
rd32(E1000_ICR);
@@ -1197,7 +1211,6 @@ void igb_down(struct igb_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
/* record the stats before reset*/
@@ -1306,13 +1319,8 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- if (mac->type < e1000_82576) {
- fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
- } else {
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
- fc->low_water = fc->high_water - 16;
- }
+ fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
fc->current_mode = fc->requested_mode;
@@ -1343,12 +1351,14 @@ void igb_reset(struct igb_adapter *adapter)
wr32(E1000_PCIEMISC,
reg & ~E1000_PCIEMISC_LX_DECISION);
}
+ if (!netif_running(adapter->netdev))
+ igb_power_down_link(adapter);
+
igb_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
- igb_reset_adaptive(hw);
igb_get_phy_info(hw);
}
@@ -1367,6 +1377,10 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_register = igb_vlan_rx_register,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = igb_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
#endif
@@ -1487,7 +1501,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
igb_get_bus_info_pcie(hw);
hw->phy.autoneg_wait_to_complete = false;
- hw->mac.adaptive_ifs = true;
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -1599,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
adapter->eeprom_wol = 0;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
@@ -1721,9 +1735,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
- if (!igb_check_reset_block(hw))
- igb_reset_phy(hw);
-
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
@@ -1999,7 +2010,7 @@ static int igb_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
- /* e1000_power_up_phy(adapter); */
+ igb_power_up_link(adapter);
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -2041,7 +2052,7 @@ static int igb_open(struct net_device *netdev)
err_req_irq:
igb_release_hw_control(adapter);
- /* e1000_power_down_phy(adapter); */
+ igb_power_down_link(adapter);
igb_free_all_rx_resources(adapter);
err_setup_rx:
igb_free_all_tx_resources(adapter);
@@ -2129,19 +2140,19 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = igb_setup_tx_resources(&adapter->tx_ring[i]);
+ err = igb_setup_tx_resources(adapter->tx_ring[i]);
if (err) {
dev_err(&pdev->dev,
"Allocation for Tx Queue %u failed\n", i);
for (i--; i >= 0; i--)
- igb_free_tx_resources(&adapter->tx_ring[i]);
+ igb_free_tx_resources(adapter->tx_ring[i]);
break;
}
}
for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
int r_idx = i % adapter->num_tx_queues;
- adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
+ adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
}
return err;
}
@@ -2224,7 +2235,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
+ igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
/**
@@ -2282,12 +2293,12 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = igb_setup_rx_resources(&adapter->rx_ring[i]);
+ err = igb_setup_rx_resources(adapter->rx_ring[i]);
if (err) {
dev_err(&pdev->dev,
"Allocation for Rx Queue %u failed\n", i);
for (i--; i >= 0; i--)
- igb_free_rx_resources(&adapter->rx_ring[i]);
+ igb_free_rx_resources(adapter->rx_ring[i]);
break;
}
}
@@ -2494,7 +2505,8 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
wr32(E1000_RLPML, max_frame_size);
}
-static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+ int vfn, bool aupe)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -2507,8 +2519,11 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
return;
vmolr = rd32(E1000_VMOLR(vfn));
- vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
- E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (aupe)
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ else
+ vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
/* clear all bits that might not be set */
vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
@@ -2575,11 +2590,14 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
+ /* Only set Drop Enable if we are supporting multiple queues */
+ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
+ srrctl |= E1000_SRRCTL_DROP_EN;
wr32(E1000_SRRCTL(reg_idx), srrctl);
/* set filtering for VMDQ pools */
- igb_set_vmolr(adapter, reg_idx & 0x7);
+ igb_set_vmolr(adapter, reg_idx & 0x7, true);
/* enable receive descriptor fetching */
rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2611,7 +2629,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
+ igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -2648,7 +2666,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_free_tx_resources(&adapter->tx_ring[i]);
+ igb_free_tx_resources(adapter->tx_ring[i]);
}
void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
@@ -2715,7 +2733,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_clean_tx_ring(&adapter->tx_ring[i]);
+ igb_clean_tx_ring(adapter->tx_ring[i]);
}
/**
@@ -2752,7 +2770,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_free_rx_resources(&adapter->rx_ring[i]);
+ igb_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -2816,7 +2834,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_clean_rx_ring(&adapter->rx_ring[i]);
+ igb_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -2858,38 +2876,30 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr = netdev->mc_list;
+ struct dev_mc_list *mc_ptr;
u8 *mta_list;
- u32 vmolr = 0;
int i;
- if (!netdev->mc_count) {
+ if (netdev_mc_empty(netdev)) {
/* nothing to program, so clear mc list */
igb_update_mc_addr_list(hw, NULL, 0);
igb_restore_vf_multicasts(adapter);
return 0;
}
- mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
- /* set vmolr receive overflow multicast bit */
- vmolr |= E1000_VMOLR_ROMPE;
-
/* The shared function expects a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
- return netdev->mc_count;
+ return netdev_mc_count(netdev);
}
/**
@@ -2910,12 +2920,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev->uc.count > rar_entries)
+ if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (netdev->uc.count && rar_entries) {
+ if (!netdev_uc_empty(netdev) && rar_entries) {
struct netdev_hw_addr *ha;
- list_for_each_entry(ha, &netdev->uc.list, list) {
+
+ netdev_for_each_uc_addr(ha, netdev) {
if (!rar_entries)
break;
igb_rar_set_qsel(adapter, ha->addr,
@@ -3019,7 +3030,7 @@ static void igb_update_phy_info(unsigned long data)
* igb_has_link - check shared code for link and determine up/down
* @adapter: pointer to driver private info
**/
-static bool igb_has_link(struct igb_adapter *adapter)
+bool igb_has_link(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
@@ -3093,17 +3104,13 @@ static void igb_watchdog_task(struct work_struct *work)
((ctrl & E1000_CTRL_RFCE) ? "RX" :
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
- /* tweak tx_queue_len according to speed/duplex and
- * adjust the timeout factor */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 14;
break;
case SPEED_100:
- netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
@@ -3136,10 +3143,9 @@ static void igb_watchdog_task(struct work_struct *work)
}
igb_update_stats(adapter);
- igb_update_adaptive(hw);
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *tx_ring = &adapter->tx_ring[i];
+ struct igb_ring *tx_ring = adapter->tx_ring[i];
if (!netif_carrier_ok(netdev)) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
@@ -3240,6 +3246,10 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
else
new_val = avg_wire_size / 2;
+ /* when in itr mode 3 do not exceed 20K ints/sec */
+ if (adapter->rx_itr_setting == 3 && new_val < 196)
+ new_val = 196;
+
set_itr_val:
if (new_val != q_vector->itr_val) {
q_vector->itr_val = new_val;
@@ -3335,13 +3345,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
adapter->rx_itr = igb_update_itr(adapter,
adapter->rx_itr,
- adapter->rx_ring->total_packets,
- adapter->rx_ring->total_bytes);
+ q_vector->rx_ring->total_packets,
+ q_vector->rx_ring->total_bytes);
adapter->tx_itr = igb_update_itr(adapter,
adapter->tx_itr,
- adapter->tx_ring->total_packets,
- adapter->tx_ring->total_bytes);
+ q_vector->tx_ring->total_packets,
+ q_vector->tx_ring->total_bytes);
current_itr = max(adapter->rx_itr, adapter->tx_itr);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
@@ -3364,10 +3374,10 @@ static void igb_set_itr(struct igb_adapter *adapter)
}
set_itr_now:
- adapter->rx_ring->total_bytes = 0;
- adapter->rx_ring->total_packets = 0;
- adapter->tx_ring->total_bytes = 0;
- adapter->tx_ring->total_packets = 0;
+ q_vector->rx_ring->total_bytes = 0;
+ q_vector->rx_ring->total_packets = 0;
+ q_vector->tx_ring->total_bytes = 0;
+ q_vector->tx_ring->total_packets = 0;
if (new_itr != q_vector->itr_val) {
/* this attempts to bias the interrupt rate towards Bulk
@@ -3407,8 +3417,8 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
int err;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx, l4len;
- *hdr_len = 0;
+ u32 mss_l4len_idx;
+ u8 l4len;
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
@@ -3427,7 +3437,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -3589,6 +3599,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
+ count++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -3610,10 +3621,10 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
- count++;
}
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@@ -3627,14 +3638,12 @@ dma_error:
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
- count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
+ while (count--) {
+ if (i == 0)
+ i = tx_ring->count;
i--;
- if (i < 0)
- i += tx_ring->count;
buffer_info = &tx_ring->buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
@@ -3643,7 +3652,7 @@ dma_error:
}
static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
+ u32 tx_flags, int count, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc;
@@ -3731,7 +3740,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
return 0;
}
-static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
@@ -3742,10 +3751,10 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
int tso = 0, count;
+ u32 tx_flags = 0;
+ u16 first;
+ u8 hdr_len = 0;
union skb_shared_tx *shtx = skb_tx(skb);
/* need: 1 descriptor per page,
@@ -3926,7 +3935,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+ adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
if (netif_running(netdev))
igb_up(adapter);
@@ -3948,7 +3957,7 @@ void igb_update_stats(struct igb_adapter *adapter)
struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- u32 rnbc;
+ u32 reg, mpc;
u16 phy_tmp;
int i;
u64 bytes, packets;
@@ -3968,10 +3977,11 @@ void igb_update_stats(struct igb_adapter *adapter)
packets = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
- adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
+ struct igb_ring *ring = adapter->rx_ring[i];
+ ring->rx_stats.drops += rqdpc_tmp;
net_stats->rx_fifo_errors += rqdpc_tmp;
- bytes += adapter->rx_ring[i].rx_stats.bytes;
- packets += adapter->rx_ring[i].rx_stats.packets;
+ bytes += ring->rx_stats.bytes;
+ packets += ring->rx_stats.packets;
}
net_stats->rx_bytes = bytes;
@@ -3980,8 +3990,9 @@ void igb_update_stats(struct igb_adapter *adapter)
bytes = 0;
packets = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- bytes += adapter->tx_ring[i].tx_stats.bytes;
- packets += adapter->tx_ring[i].tx_stats.packets;
+ struct igb_ring *ring = adapter->tx_ring[i];
+ bytes += ring->tx_stats.bytes;
+ packets += ring->tx_stats.packets;
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
@@ -4004,7 +4015,9 @@ void igb_update_stats(struct igb_adapter *adapter)
adapter->stats.symerrs += rd32(E1000_SYMERRS);
adapter->stats.sec += rd32(E1000_SEC);
- adapter->stats.mpc += rd32(E1000_MPC);
+ mpc = rd32(E1000_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
adapter->stats.scc += rd32(E1000_SCC);
adapter->stats.ecol += rd32(E1000_ECOL);
adapter->stats.mcc += rd32(E1000_MCC);
@@ -4019,9 +4032,7 @@ void igb_update_stats(struct igb_adapter *adapter)
adapter->stats.gptc += rd32(E1000_GPTC);
adapter->stats.gotc += rd32(E1000_GOTCL);
rd32(E1000_GOTCH); /* clear GOTCL */
- rnbc = rd32(E1000_RNBC);
- adapter->stats.rnbc += rnbc;
- net_stats->rx_fifo_errors += rnbc;
+ adapter->stats.rnbc += rd32(E1000_RNBC);
adapter->stats.ruc += rd32(E1000_RUC);
adapter->stats.rfc += rd32(E1000_RFC);
adapter->stats.rjc += rd32(E1000_RJC);
@@ -4039,15 +4050,17 @@ void igb_update_stats(struct igb_adapter *adapter)
adapter->stats.mptc += rd32(E1000_MPTC);
adapter->stats.bptc += rd32(E1000_BPTC);
- /* used for adaptive IFS */
- hw->mac.tx_packet_delta = rd32(E1000_TPT);
- adapter->stats.tpt += hw->mac.tx_packet_delta;
- hw->mac.collision_delta = rd32(E1000_COLC);
- adapter->stats.colc += hw->mac.collision_delta;
+ adapter->stats.tpt += rd32(E1000_TPT);
+ adapter->stats.colc += rd32(E1000_COLC);
adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
- adapter->stats.rxerrc += rd32(E1000_RXERRC);
- adapter->stats.tncrs += rd32(E1000_TNCRS);
+ /* read internal phy specific stats */
+ reg = rd32(E1000_CTRL_EXT);
+ if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
+ adapter->stats.rxerrc += rd32(E1000_RXERRC);
+ adapter->stats.tncrs += rd32(E1000_TNCRS);
+ }
+
adapter->stats.tsctc += rd32(E1000_TSCTC);
adapter->stats.tsctfc += rd32(E1000_TSCTFC);
@@ -4110,6 +4123,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
u32 icr = rd32(E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4139,6 +4155,7 @@ static irqreturn_t igb_msix_other(int irq, void *data)
static void igb_write_itr(struct igb_q_vector *q_vector)
{
+ struct igb_adapter *adapter = q_vector->adapter;
u32 itr_val = q_vector->itr_val & 0x7FFC;
if (!q_vector->set_itr)
@@ -4147,8 +4164,8 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
if (!itr_val)
itr_val = 0x4;
- if (q_vector->itr_shift)
- itr_val |= itr_val << q_vector->itr_shift;
+ if (adapter->hw.mac.type == e1000_82575)
+ itr_val |= itr_val << 16;
else
itr_val |= 0x8000000;
@@ -4225,9 +4242,8 @@ static void igb_setup_dca(struct igb_adapter *adapter)
wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- q_vector->cpu = -1;
- igb_update_dca(q_vector);
+ adapter->q_vector[i]->cpu = -1;
+ igb_update_dca(adapter->q_vector[i]);
}
}
@@ -4501,10 +4517,57 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
reg |= size;
wr32(E1000_VMOLR(vf), reg);
}
- return 0;
}
}
- return -1;
+ return 0;
+}
+
+static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (vid)
+ wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+ else
+ wr32(E1000_VMVIR(vf), 0);
+}
+
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+ if (err)
+ goto out;
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vlan);
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
+ igb_set_vmvir(adapter, vlan, vf);
+ igb_set_vmolr(adapter, vf, true);
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ }
+out:
+ return err;
}
static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -4517,15 +4580,21 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{
- /* clear all flags */
- adapter->vf_data[vf].flags = 0;
+ /* clear flags */
+ adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
adapter->vf_data[vf].last_nack = jiffies;
/* reset offloads to defaults */
- igb_set_vmolr(adapter, vf);
+ igb_set_vmolr(adapter, vf, true);
/* reset vlans for device */
igb_clear_vf_vfta(adapter, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_ndo_set_vf_vlan(adapter->netdev, vf,
+ adapter->vf_data[vf].pf_vlan,
+ adapter->vf_data[vf].pf_qos);
+ else
+ igb_clear_vf_vfta(adapter, vf);
/* reset multicast table array for vf */
adapter->vf_data[vf].num_vf_mc_hashes = 0;
@@ -4539,7 +4608,8 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
/* generate a new mac address as we were hotplug removed/added */
- random_ether_addr(vf_mac);
+ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+ random_ether_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -4652,7 +4722,10 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
break;
case E1000_VF_SET_VLAN:
- retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ retval = -1;
+ else
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
break;
default:
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
@@ -4733,6 +4806,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
igb_write_itr(q_vector);
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4772,6 +4848,9 @@ static irqreturn_t igb_intr(int irq, void *data)
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4935,7 +5014,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
if (skb) {
unsigned int segs, bytecount;
/* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
+ segs = buffer_info->gso_segs;
/* multiply data chunks by size of headers */
bytecount = ((segs - 1) * skb_headlen(skb)) +
skb->len;
@@ -5025,7 +5104,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
{
struct igb_adapter *adapter = q_vector->adapter;
- if (vlan_tag)
+ if (vlan_tag && adapter->vlgrp)
vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
vlan_tag, skb);
else
@@ -5753,7 +5832,9 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = wufc || adapter->en_mng_pt;
if (!*enable_wake)
- igb_shutdown_serdes_link_82575(hw);
+ igb_power_down_link(adapter);
+ else
+ igb_power_up_link(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
@@ -5793,6 +5874,7 @@ static int igb_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -5810,8 +5892,6 @@ static int igb_resume(struct pci_dev *pdev)
return -ENOMEM;
}
- /* e1000_power_up_phy(adapter); */
-
igb_reset(adapter);
/* let the f/w know that the h/w is now under the control of the
@@ -5920,6 +6000,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6008,6 +6089,43 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
return 0;
}
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+ return -EINVAL;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return igb_set_vf_mac(adapter, vf, mac);
+}
+
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igb_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vf_data[vf].pf_vlan;
+ ivi->qos = adapter->vf_data[vf].pf_qos;
+ return 0;
+}
+
static void igb_vmm_control(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index a1774b29d222..debeee2dc717 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -198,7 +198,6 @@ struct igbvf_adapter {
struct igbvf_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
- unsigned long tx_queue_len;
unsigned int restart_queue;
u32 txd_cmd;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index e9dd95f136aa..1b1edad1eb5e 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -35,6 +35,7 @@
#include <linux/netdevice.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/mii.h>
@@ -1304,8 +1305,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
/* enable Report Status bit */
adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
-
- adapter->tx_queue_len = adapter->netdev->tx_queue_len;
}
/**
@@ -1403,8 +1402,8 @@ static void igbvf_set_multi(struct net_device *netdev)
u8 *mta_list = NULL;
int i;
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list) {
dev_err(&adapter->pdev->dev,
"failed to allocate multicast filter list\n");
@@ -1413,15 +1412,9 @@ static void igbvf_set_multi(struct net_device *netdev)
}
/* prepare a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
-
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
- ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
kfree(mta_list);
@@ -1530,7 +1523,6 @@ void igbvf_down(struct igbvf_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
/* record the stats before reset*/
@@ -1863,21 +1855,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
&adapter->link_duplex);
igbvf_print_link_info(adapter);
- /*
- * tweak tx_queue_len according to speed/duplex
- * and adjust the timeout factor
- */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
txb2b = 0;
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = 0;
- netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
@@ -1963,7 +1949,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -2117,6 +2103,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
+ buffer_info->mapped_as_page = false;
buffer_info->dma = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
@@ -2126,6 +2113,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
+ count++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -2146,7 +2134,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
- count++;
}
tx_ring->buffer_info[i].skb = skb;
@@ -2163,14 +2150,14 @@ dma_error:
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
- count--;
+ if (count)
+ count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
igbvf_put_txbuf(adapter, buffer_info);
}
@@ -2608,11 +2595,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
- dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
- /* MAC address */
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
}
@@ -2763,7 +2746,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address\n");
+ "PF still in reset state, assigning new address."
+ " Is the PF interface up?\n");
random_ether_addr(hw->mac.addr);
} else {
err = hw->mac.ops.read_mac_addr(hw);
@@ -2777,11 +2761,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->dev_addr);
err = -EIO;
goto err_hw_init;
}
@@ -2883,7 +2864,7 @@ static struct pci_error_handlers igbvf_err_handler = {
.resume = igbvf_io_resume,
};
-static struct pci_device_id igbvf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
{ } /* terminate list */
};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c2..8f6197d647c0 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -44,6 +44,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
#ifdef CONFIG_SERIAL_8250
#include <linux/serial_core.h>
@@ -1383,7 +1384,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
*/
}
-static struct pci_device_id ioc3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
@@ -1664,11 +1665,10 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
u64 ehar = 0;
- int i;
netif_stop_queue(dev); /* Lock out others. */
@@ -1681,16 +1681,16 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
(void) ioc3_r_emcr();
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
multicast packets anyway, so skip computing all the
hashes and just accept all packets. */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
char *addr = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addr & 1))
continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a0..639bf9fb0279 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -22,6 +22,7 @@
*/
#include <linux/crc32.h>
#include <linux/ethtool.h>
+#include <linux/gfp.h>
#include <linux/mii.h>
#include <linux/mutex.h>
@@ -88,17 +89,15 @@ static const char *ipg_brand_name[] = {
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
- "D-Link NIC",
"D-Link NIC IP1000A"
};
-static struct pci_device_id ipg_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
{ PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
{ PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4000), 4 },
- { PCI_VDEVICE(DLINK, 0x4020), 5 },
+ { PCI_VDEVICE(DLINK, 0x4020), 4 },
{ 0, }
};
@@ -585,11 +584,11 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
receivemode = IPG_RM_RECEIVEALLFRAMES;
} else if ((dev->flags & IFF_ALLMULTI) ||
((dev->flags & IFF_MULTICAST) &&
- (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
+ (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
/* NIC to be configured to receive all multicast
* frames. */
receivemode |= IPG_RM_RECEIVEMULTICAST;
- } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) {
+ } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
/* NIC to be configured to receive selected
* multicast addresses. */
receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
@@ -610,8 +609,7 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
hashtable[1] = 0x00000000;
/* Cycle through all multicast addresses to filter. */
- for (mc_list_ptr = dev->mc_list;
- mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) {
+ netdev_for_each_mc_addr(mc_list_ptr, dev) {
/* Calculate CRC result for each multicast address. */
hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
ETH_ALEN);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index f76384221422..af10e97345ce 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,16 @@ endchoice
comment "Dongle support"
+config SH_SIR
+ tristate "SuperH SIR on UART"
+ depends on IRDA && SUPERH && \
+ (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
+ CPU_SUBTYPE_SH7724)
+ default n
+ help
+ Say Y here if your want to enable SIR function on SuperH UART
+ devices.
+
config DONGLE
bool "Serial dongle support"
depends on IRTTY_SIR
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index d82e1e3bd8c8..e030d47e2793 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
+obj-$(CONFIG_SH_SIR) += sh_sir.o
# dongle drivers for SIR drivers
obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 12c7b006f767..28992c815cba 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -22,6 +22,7 @@
********************************************************************/
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -29,7 +30,6 @@
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 9b2eebdbb25b..b5cbd39d0685 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -36,6 +36,7 @@
#include <asm/pb1000.h>
#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
#include <asm/db1x00.h>
+#include <asm/mach-db1x00/bcsr.h>
#else
#error au1k_ir: unsupported board
#endif
@@ -66,10 +67,6 @@ static char version[] __devinitdata =
#define RUN_AT(x) (jiffies + (x))
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
-static BCSR * const bcsr = (BCSR *)0xAE000000;
-#endif
-
static DEFINE_SPINLOCK(ir_lock);
/*
@@ -282,9 +279,8 @@ static int au1k_irda_net_init(struct net_device *dev)
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
/* power on */
- bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
- bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL;
- au_sync();
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
+ BCSR_RESETS_IRDA_MODE_FULL);
#endif
return 0;
@@ -720,14 +716,14 @@ au1k_irda_set_speed(struct net_device *dev, int speed)
if (speed == 4000000) {
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr->resets |= BCSR_RESETS_FIR_SEL;
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
#else /* Pb1000 and Pb1100 */
writel(1<<13, CPLD_AUX1);
#endif
}
else {
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr->resets &= ~BCSR_RESETS_FIR_SEL;
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
#else /* Pb1000 and Pb1100 */
writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
#endif
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index dac71b1f4f9b..b54a6f08db45 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d5572..b7e6625ca75e 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
-static struct pci_device_id toshoboe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index e8e33bb9d876..2c9b3af16612 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1651,6 +1651,8 @@ static int irda_usb_probe(struct usb_interface *intf,
self->rx_urb = kcalloc(self->max_rx_urb, sizeof(struct urb *),
GFP_KERNEL);
+ if (!self->rx_urb)
+ goto err_free_net;
for (i = 0; i < self->max_rx_urb; i++) {
self->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
@@ -1783,6 +1785,8 @@ err_out_2:
err_out_1:
for (i = 0; i < self->max_rx_urb; i++)
usb_free_urb(self->rx_urb[i]);
+ kfree(self->rx_urb);
+err_free_net:
free_netdev(net);
err_out:
return ret;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 20f9bc626688..ee1dde52e8fc 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/init.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 2413295ebd90..e30cdbb14745 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -43,6 +43,7 @@
********************************************************************/
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -50,7 +51,6 @@
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 84db145d2b59..1a54f6bb68c5 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/slab.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index c412e8026173..1dcdce0631aa 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -331,7 +331,7 @@ static int sa1100_irda_resume(struct platform_device *pdev)
* If we missed a speed change, initialise at the new speed
* directly. It is debatable whether this is actually
* required, but in the interests of continuing from where
- * we left off it is desireable. The converse argument is
+ * we left off it is desirable. The converse argument is
* that we should re-negotiate at 9600 baud again.
*/
if (si->newspeed) {
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
new file mode 100644
index 000000000000..0745581c4b5e
--- /dev/null
+++ b/drivers/net/irda/sh_sir.c
@@ -0,0 +1,824 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on bfin_sir.c
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/clock.h>
+
+#define DRIVER_NAME "sh_sir"
+
+#define RX_PHASE (1 << 0)
+#define TX_PHASE (1 << 1)
+#define TX_COMP_PHASE (1 << 2) /* tx complete */
+#define NONE_PHASE (1 << 31)
+
+#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
+#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
+#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
+#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
+#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
+#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
+#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
+#define IRIF_SIR_EOF 0x002A /* EOF value */
+#define IRIF_SIR_FLG 0x002C /* Flag clear */
+#define IRIF_UART_STS2 0x002E /* UART status 2 */
+#define IRIF_UART0 0x0030 /* UART control */
+#define IRIF_UART1 0x0032 /* UART status */
+#define IRIF_UART2 0x0034 /* UART mode */
+#define IRIF_UART3 0x0036 /* UART transmit data */
+#define IRIF_UART4 0x0038 /* UART receive data */
+#define IRIF_UART5 0x003A /* UART interrupt mask */
+#define IRIF_UART6 0x003C /* UART baud rate error correction */
+#define IRIF_UART7 0x003E /* UART baud rate count set */
+#define IRIF_CRC0 0x0040 /* CRC engine control */
+#define IRIF_CRC1 0x0042 /* CRC engine input data */
+#define IRIF_CRC2 0x0044 /* CRC engine calculation */
+#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
+#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
+
+/* IRIF_SIR0 */
+#define IRTPW (1 << 1) /* transmit pulse width select */
+#define IRERRC (1 << 0) /* Clear receive pulse width error */
+
+/* IRIF_SIR3 */
+#define IRERR (1 << 0) /* received pulse width Error */
+
+/* IRIF_SIR_FRM */
+#define EOFD (1 << 9) /* EOF detection flag */
+#define FRER (1 << 8) /* Frame Error bit */
+#define FRP (1 << 0) /* Frame processing set */
+
+/* IRIF_UART_STS2 */
+#define IRSME (1 << 6) /* Receive Sum Error flag */
+#define IROVE (1 << 5) /* Receive Overrun Error flag */
+#define IRFRE (1 << 4) /* Receive Framing Error flag */
+#define IRPRE (1 << 3) /* Receive Parity Error flag */
+
+/* IRIF_UART0_*/
+#define TBEC (1 << 2) /* Transmit Data Clear */
+#define RIE (1 << 1) /* Receive Enable */
+#define TIE (1 << 0) /* Transmit Enable */
+
+/* IRIF_UART1 */
+#define URSME (1 << 6) /* Receive Sum Error Flag */
+#define UROVE (1 << 5) /* Receive Overrun Error Flag */
+#define URFRE (1 << 4) /* Receive Framing Error Flag */
+#define URPRE (1 << 3) /* Receive Parity Error Flag */
+#define RBF (1 << 2) /* Receive Buffer Full Flag */
+#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
+#define TBE (1 << 0) /* Transmit Buffer Empty flag */
+#define TBCOMP (TSBE | TBE)
+
+/* IRIF_UART5 */
+#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
+#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
+#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
+#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
+#define RX_MASK (RSEIM | RBFIM)
+
+/* IRIF_CRC0 */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF
+
+/************************************************************************
+
+
+ structure
+
+
+************************************************************************/
+struct sh_sir_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
+{
+ iowrite16(data, self->membase + offset);
+}
+
+static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
+{
+ return ioread16(self->membase + offset);
+}
+
+static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ u16 old, new;
+
+ old = sh_sir_read(self, offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ sh_sir_write(self, offset, new);
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_sir_crc_reset(struct sh_sir_self *self)
+{
+ sh_sir_write(self, IRIF_CRC0, CRC_RST);
+}
+
+static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
+{
+ sh_sir_write(self, IRIF_CRC1, (u16)data);
+}
+
+static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
+{
+ return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
+}
+
+static u16 sh_sir_crc_out(struct sh_sir_self *self)
+{
+ return sh_sir_read(self, IRIF_CRC4);
+}
+
+static int sh_sir_crc_init(struct sh_sir_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_sir_crc_reset(self);
+
+ sh_sir_crc_add(self, 0xCC);
+ sh_sir_crc_add(self, 0xF5);
+ sh_sir_crc_add(self, 0xF1);
+ sh_sir_crc_add(self, 0xA7);
+
+ val = sh_sir_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_sir_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_sir_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ baud rate functions
+
+
+************************************************************************/
+#define SCLK_BASE 1843200 /* 1.8432MHz */
+
+static u32 sh_sir_find_sclk(struct clk *irda_clk)
+{
+ struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
+ struct clk *pclk = clk_get(NULL, "peripheral_clk");
+ u32 limit, min = 0xffffffff, tmp;
+ int i, index = 0;
+
+ limit = clk_get_rate(pclk);
+ clk_put(pclk);
+
+ /* IrDA can not set over peripheral_clk */
+ for (i = 0;
+ freq_table[i].frequency != CPUFREQ_TABLE_END;
+ i++) {
+ u32 freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ /* IrDA should not over peripheral_clk */
+ if (freq > limit)
+ continue;
+
+ tmp = freq % SCLK_BASE;
+ if (tmp < min) {
+ min = tmp;
+ index = i;
+ }
+ }
+
+ return freq_table[index].frequency;
+}
+
+#define ERR_ROUNDING(a) ((a + 5000) / 10000)
+static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
+{
+ struct clk *clk;
+ struct device *dev = &self->ndev->dev;
+ u32 rate;
+ u16 uabca, uabc;
+ u16 irbca, irbc;
+ u32 min, rerr, tmp;
+ int i;
+
+ /* Baud Rate Error Correction x 10000 */
+ u32 rate_err_array[] = {
+ 0000, 0625, 1250, 1875,
+ 2500, 3125, 3750, 4375,
+ 5000, 5625, 6250, 6875,
+ 7500, 8125, 8750, 9375,
+ };
+
+ /*
+ * FIXME
+ *
+ * it support 9600 only now
+ */
+ switch (baudrate) {
+ case 9600:
+ break;
+ default:
+ dev_err(dev, "un-supported baudrate %d\n", baudrate);
+ return -EIO;
+ }
+
+ clk = clk_get(NULL, "irda_clk");
+ if (!clk) {
+ dev_err(dev, "can not get irda_clk\n");
+ return -EIO;
+ }
+
+ clk_set_rate(clk, sh_sir_find_sclk(clk));
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ dev_dbg(dev, "selected sclk = %d\n", rate);
+
+ /*
+ * CALCULATION
+ *
+ * 1843200 = system rate / (irbca + (irbc + 1))
+ */
+
+ irbc = rate / SCLK_BASE;
+
+ tmp = rate - (SCLK_BASE * irbc);
+ tmp *= 10000;
+
+ rerr = tmp / SCLK_BASE;
+
+ min = 0xffffffff;
+ irbca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ irbca = i;
+ }
+ }
+
+ tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
+ if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
+ dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
+ SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
+
+ irbca = (irbca & 0xF) << 4;
+ irbc = (irbc - 1) & 0xF;
+
+ if (!irbc) {
+ dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
+ return -EIO;
+ }
+
+ sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
+ sh_sir_write(self, IRIF_SIR1, irbca);
+ sh_sir_write(self, IRIF_SIR2, irbc);
+
+ /*
+ * CALCULATION
+ *
+ * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
+ */
+
+ uabc = rate / baudrate;
+ uabc = (uabc / 16) - 1;
+ uabc = (uabc + 1) * 16;
+
+ tmp = rate - (uabc * baudrate);
+ tmp *= 10000;
+
+ rerr = tmp / baudrate;
+
+ min = 0xffffffff;
+ uabca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ uabca = i;
+ }
+ }
+
+ tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
+ if ((baudrate / 100) < abs(tmp - baudrate))
+ dev_warn(dev, "UART freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
+ baudrate, tmp,
+ uabc, rate_err_array[uabca]);
+
+ uabca = (uabca & 0xF) << 4;
+ uabc = (uabc / 16) - 1;
+
+ sh_sir_write(self, IRIF_UART6, uabca);
+ sh_sir_write(self, IRIF_UART7, uabc);
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static int __sh_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+
+ return 0;
+}
+
+static void sh_sir_remove_iobuf(struct sh_sir_self *self)
+{
+ kfree(self->rx_buff.head);
+ kfree(self->tx_buff.head);
+
+ self->rx_buff.head = NULL;
+ self->tx_buff.head = NULL;
+}
+
+static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
+{
+ int err = -ENOMEM;
+
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return err;
+ }
+
+ err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
+ if (err)
+ goto iobuf_err;
+
+ err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
+
+iobuf_err:
+ if (err)
+ sh_sir_remove_iobuf(self);
+
+ return err;
+}
+
+/************************************************************************
+
+
+ status function
+
+
+************************************************************************/
+static void sh_sir_clear_all_err(struct sh_sir_self *self)
+{
+ /* Clear error flag for receive pulse width */
+ sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
+
+ /* Clear frame / EOF error flag */
+ sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
+
+ /* Clear all status error */
+ sh_sir_write(self, IRIF_UART_STS2, 0);
+}
+
+static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
+{
+ u16 uart5 = 0;
+ u16 uart0 = 0;
+
+ switch (phase) {
+ case TX_PHASE:
+ uart5 = TBEIM;
+ uart0 = TBEC | TIE;
+ break;
+ case TX_COMP_PHASE:
+ uart5 = TSBEIM;
+ uart0 = TIE;
+ break;
+ case RX_PHASE:
+ uart5 = RX_MASK;
+ uart0 = RIE;
+ break;
+ default:
+ break;
+ }
+
+ sh_sir_write(self, IRIF_UART5, uart5);
+ sh_sir_write(self, IRIF_UART0, uart0);
+}
+
+static int sh_sir_is_which_phase(struct sh_sir_self *self)
+{
+ u16 val = sh_sir_read(self, IRIF_UART5);
+
+ if (val & TBEIM)
+ return TX_PHASE;
+
+ if (val & TSBEIM)
+ return TX_COMP_PHASE;
+
+ if (val & RX_MASK)
+ return RX_PHASE;
+
+ return NONE_PHASE;
+}
+
+static void sh_sir_tx(struct sh_sir_self *self, int phase)
+{
+ switch (phase) {
+ case TX_PHASE:
+ if (0 >= self->tx_buff.len) {
+ sh_sir_set_phase(self, TX_COMP_PHASE);
+ } else {
+ sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
+ self->tx_buff.len--;
+ self->tx_buff.data++;
+ }
+ break;
+ case TX_COMP_PHASE:
+ sh_sir_set_phase(self, RX_PHASE);
+ netif_wake_queue(self->ndev);
+ break;
+ default:
+ dev_err(&self->ndev->dev, "should not happen\n");
+ break;
+ }
+}
+
+static int sh_sir_read_data(struct sh_sir_self *self)
+{
+ u16 val;
+ int timeout = 1024;
+
+ while (timeout--) {
+ val = sh_sir_read(self, IRIF_UART1);
+
+ /* data get */
+ if (val & RBF) {
+ if (val & (URSME | UROVE | URFRE | URPRE))
+ break;
+
+ return (int)sh_sir_read(self, IRIF_UART4);
+ }
+
+ udelay(1);
+ }
+
+ dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
+ val, sh_sir_read(self, IRIF_UART_STS2));
+
+ /* read data register for clear error */
+ sh_sir_read(self, IRIF_UART4);
+
+ return -1;
+}
+
+static void sh_sir_rx(struct sh_sir_self *self)
+{
+ int timeout = 1024;
+ int data;
+
+ while (timeout--) {
+ data = sh_sir_read_data(self);
+ if (data < 0)
+ break;
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, (u8)data);
+ self->ndev->last_rx = jiffies;
+
+ if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
+ continue;
+
+ break;
+ }
+}
+
+static irqreturn_t sh_sir_irq(int irq, void *dev_id)
+{
+ struct sh_sir_self *self = dev_id;
+ struct device *dev = &self->ndev->dev;
+ int phase = sh_sir_is_which_phase(self);
+
+ switch (phase) {
+ case TX_COMP_PHASE:
+ case TX_PHASE:
+ sh_sir_tx(self, phase);
+ break;
+ case RX_PHASE:
+ if (sh_sir_read(self, IRIF_SIR3))
+ dev_err(dev, "rcv pulse width error occurred\n");
+
+ sh_sir_rx(self);
+ sh_sir_clear_all_err(self);
+ break;
+ default:
+ dev_err(dev, "unknown interrupt\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int speed = irda_get_next_speed(skb);
+
+ if ((0 < speed) &&
+ (9600 != speed)) {
+ dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
+ return -EIO;
+ }
+
+ netif_stop_queue(ndev);
+
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = 0;
+ if (skb->len)
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ sh_sir_set_phase(self, TX_PHASE);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_sir_open(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_sir_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_sir_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap)
+ goto open_err;
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
+ sh_sir_read(self, IRIF_UART1); /* flag clear */
+ sh_sir_read(self, IRIF_UART4); /* flag clear */
+ sh_sir_set_phase(self, RX_PHASE);
+
+ netif_start_queue(ndev);
+
+ dev_info(&self->ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_sir_stop(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_sir_ndo = {
+ .ndo_open = sh_sir_open,
+ .ndo_stop = sh_sir_stop,
+ .ndo_start_xmit = sh_sir_hard_xmit,
+ .ndo_do_ioctl = sh_sir_ioctl,
+ .ndo_get_stats = sh_sir_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_sir_self *self;
+ struct resource *res;
+ char clk_name[8];
+ void __iomem *base;
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ self = netdev_priv(ndev);
+ err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_sir_ndo;
+ ndev->irq = irq;
+
+ self->membase = base;
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_sir_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_sir_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_sir_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_sir_driver = {
+ .probe = sh_sir_probe,
+ .remove = __devexit_p(sh_sir_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_sir_init(void)
+{
+ return platform_driver_register(&sh_sir_driver);
+}
+
+static void __exit sh_sir_exit(void)
+{
+ platform_driver_unregister(&sh_sir_driver);
+}
+
+module_init(sh_sir_init);
+module_exit(sh_sir_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 4b2a1a9eac2a..de91cd14016b 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8f7d0d146f24..6af84d88cd03 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -48,13 +48,13 @@
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
#include <linux/pnp.h>
#include <linux/platform_device.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd5453..b0a6cd815be1 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -45,11 +45,11 @@ F02 Oct/28/02: Add SB device ID for 3147 and 3177.
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
}
}
-static struct pci_device_id via_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76a..209d4bcfaced 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
static /* const */ char drivername[] = DRIVER_NAME;
-static struct pci_device_id vlsi_irda_table [] = {
+static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
{
.class = PCI_CLASS_WIRELESS_IRDA << 8,
.class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 551810fd2976..cb0cb758be64 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -46,10 +46,10 @@
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -65,7 +65,6 @@
#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
#endif
-#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */
#define CONFIG_USE_W977_PNP /* Currently needed */
#define PIO_MAX_SPEED 115200
@@ -533,25 +532,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
self->tx_buff.len = skb->len;
mtt = irda_get_mtt(skb);
-#ifdef CONFIG_USE_INTERNAL_TIMER
- if (mtt > 50) {
- /* Adjust for timer resolution */
- mtt /= 1000+1;
-
- /* Setup timer */
- switch_bank(iobase, SET4);
- outb(mtt & 0xff, iobase+TMRL);
- outb((mtt >> 8) & 0x0f, iobase+TMRH);
-
- /* Start timer */
- outb(IR_MSL_EN_TMR, iobase+IR_MSL);
- self->io.direction = IO_XMIT;
-
- /* Enable timer interrupt */
- switch_bank(iobase, SET0);
- outb(ICR_ETMRI, iobase+ICR);
- } else {
-#endif
IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
if (mtt)
udelay(mtt);
@@ -560,9 +540,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
switch_bank(iobase, SET0);
outb(ICR_EDMAI, iobase+ICR);
w83977af_dma_write(self, iobase);
-#ifdef CONFIG_USE_INTERNAL_TIMER
- }
-#endif
} else {
self->tx_buff.data = self->tx_buff.head;
self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
@@ -876,20 +853,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
/* Check if we have transferred all data to memory */
switch_bank(iobase, SET0);
if (inb(iobase+USR) & USR_RDR) {
-#ifdef CONFIG_USE_INTERNAL_TIMER
- /* Put this entry back in fifo */
- st_fifo->head--;
- st_fifo->len++;
- st_fifo->entries[st_fifo->head].status = status;
- st_fifo->entries[st_fifo->head].len = len;
-
- /* Restore set register */
- outb(set, iobase+SSR);
-
- return FALSE; /* I'll be back! */
-#else
udelay(80); /* Should be enough!? */
-#endif
}
skb = dev_alloc_skb(len+1);
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
deleted file mode 100644
index 04d0502726c0..000000000000
--- a/drivers/net/isa-skeleton.c
+++ /dev/null
@@ -1,718 +0,0 @@
-/* isa-skeleton.c: A network driver outline for linux.
- *
- * Written 1993-94 by Donald Becker.
- *
- * Copyright 1993 United States Government as represented by the
- * Director, National Security Agency.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * The author may be reached as becker@scyld.com, or C/O
- * Scyld Computing Corporation
- * 410 Severn Ave., Suite 210
- * Annapolis MD 21403
- *
- * This file is an outline for writing a network device driver for the
- * the Linux operating system.
- *
- * To write (or understand) a driver, have a look at the "loopback.c" file to
- * get a feel of what is going on, and then use the code below as a skeleton
- * for the new driver.
- *
- */
-
-static const char *version =
- "isa-skeleton.c:v1.51 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-/*
- * Sources:
- * List your sources of programming information to document that
- * the driver is your own creation, and give due credit to others
- * that contributed to the work. Remember that GNU project code
- * cannot use proprietary or trade secret information. Interface
- * definitions are generally considered non-copyrightable to the
- * extent that the same names and structures must be used to be
- * compatible.
- *
- * Finally, keep in mind that the Linux kernel is has an API, not
- * ABI. Proprietary object-code-only distributions are not permitted
- * under the GPL.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-/*
- * The name of the card. Is used for messages and in the requests for
- * io regions, irqs and dma channels
- */
-static const char* cardname = "netcard";
-
-/* First, a few definitions that the brave might change. */
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int netcard_portlist[] __initdata =
- { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 2
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* The number of low I/O ports used by the ethercard. */
-#define NETCARD_IO_EXTENT 32
-
-#define MY_TX_TIMEOUT ((400*HZ)/1000)
-
-/* Information that need to be kept for each board. */
-struct net_local {
- struct net_device_stats stats;
- long open_time; /* Useless example local info. */
-
- /* Tx control lock. This protects the transmit buffer ring
- * state along with the "tx full" state of the driver. This
- * means all netif_queue flow control actions are protected
- * by this lock as well.
- */
- spinlock_t lock;
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00
-#define SA_ADDR1 0x42
-#define SA_ADDR2 0x65
-
-/* Index to functions, as function prototypes. */
-
-static int netcard_probe1(struct net_device *dev, int ioaddr);
-static int net_open(struct net_device *dev);
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t net_interrupt(int irq, void *dev_id);
-static void net_rx(struct net_device *dev);
-static int net_close(struct net_device *dev);
-static struct net_device_stats *net_get_stats(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void net_tx_timeout(struct net_device *dev);
-
-
-/* Example routines you must write ;->. */
-#define tx_done(dev) 1
-static void hardware_send_packet(short ioaddr, char *buf, int length);
-static void chipset_init(struct net_device *dev, int startp);
-
-/*
- * Check for a network adaptor of this type, and return '0' iff one exists.
- * If dev->base_addr == 0, probe all likely locations.
- * If dev->base_addr == 1, always return failure.
- * If dev->base_addr == 2, allocate space for the device and return success
- * (detachable devices only).
- */
-static int __init do_netcard_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return netcard_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; netcard_portlist[i]; i++) {
- int ioaddr = netcard_portlist[i];
- if (netcard_probe1(dev, ioaddr) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
-#ifdef jumpered_dma
- free_dma(dev->dma);
-#endif
-#ifdef jumpered_interrupts
- free_irq(dev->irq, dev);
-#endif
- release_region(dev->base_addr, NETCARD_IO_EXTENT);
-}
-
-#ifndef MODULE
-struct net_device * __init netcard_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_netcard_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops netcard_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = net_send_packet,
- .ndo_get_stats = net_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
- .ndo_tx_timeout = net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-static int __init netcard_probe1(struct net_device *dev, int ioaddr)
-{
- struct net_local *np;
- static unsigned version_printed;
- int i;
- int err = -ENODEV;
-
- /* Grab the region so that no one else tries to probe our ioports. */
- if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname))
- return -EBUSY;
-
- /*
- * For ethernet adaptors the first three octets of the station address
- * contains the manufacturer's unique code. That might be a good probe
- * method. Ideally you would add additional checks.
- */
- if (inb(ioaddr + 0) != SA_ADDR0 ||
- inb(ioaddr + 1) != SA_ADDR1 ||
- inb(ioaddr + 2) != SA_ADDR2)
- goto out;
-
- if (net_debug && version_printed++ == 0)
- printk(KERN_DEBUG "%s", version);
-
- printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cardname, ioaddr);
-
- /* Fill in the 'dev' fields. */
- dev->base_addr = ioaddr;
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
-
- printk("%pM", dev->dev_addr);
-
- err = -EAGAIN;
-#ifdef jumpered_interrupts
- /*
- * If this board has jumpered interrupts, allocate the interrupt
- * vector now. There is no point in waiting since no other device
- * can use the interrupt, and this marks the irq as busy. Jumpered
- * interrupts are typically not reported by the boards, and we must
- * used autoIRQ to find them.
- */
-
- if (dev->irq == -1)
- ; /* Do nothing: a user-level program will set it. */
- else if (dev->irq < 2) { /* "Auto-IRQ" */
- unsigned long irq_mask = probe_irq_on();
- /* Trigger an interrupt here. */
-
- dev->irq = probe_irq_off(irq_mask);
- if (net_debug >= 2)
- printk(" autoirq is %d", dev->irq);
- } else if (dev->irq == 2)
- /*
- * Fixup for users that don't know that IRQ 2 is really
- * IRQ9, or don't know which one to set.
- */
- dev->irq = 9;
-
- {
- int irqval = request_irq(dev->irq, net_interrupt, 0, cardname, dev);
- if (irqval) {
- printk("%s: unable to get IRQ %d (irqval=%d).\n",
- dev->name, dev->irq, irqval);
- goto out;
- }
- }
-#endif /* jumpered interrupt */
-#ifdef jumpered_dma
- /*
- * If we use a jumpered DMA channel, that should be probed for and
- * allocated here as well. See lance.c for an example.
- */
- if (dev->dma == 0) {
- if (request_dma(dev->dma, cardname)) {
- printk("DMA %d allocation failed.\n", dev->dma);
- goto out1;
- } else
- printk(", assigned DMA %d.\n", dev->dma);
- } else {
- short dma_status, new_dma_status;
-
- /* Read the DMA channel status registers. */
- dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
- (inb(DMA2_STAT_REG) & 0xf0);
- /* Trigger a DMA request, perhaps pause a bit. */
- outw(0x1234, ioaddr + 8);
- /* Re-read the DMA status registers. */
- new_dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
- (inb(DMA2_STAT_REG) & 0xf0);
- /*
- * Eliminate the old and floating requests,
- * and DMA4 the cascade.
- */
- new_dma_status ^= dma_status;
- new_dma_status &= ~0x10;
- for (i = 7; i > 0; i--)
- if (test_bit(i, &new_dma_status)) {
- dev->dma = i;
- break;
- }
- if (i <= 0) {
- printk("DMA probe failed.\n");
- goto out1;
- }
- if (request_dma(dev->dma, cardname)) {
- printk("probed DMA %d allocation failed.\n", dev->dma);
- goto out1;
- }
- }
-#endif /* jumpered DMA */
-
- np = netdev_priv(dev);
- spin_lock_init(&np->lock);
-
- dev->netdev_ops = &netcard_netdev_ops;
- dev->watchdog_timeo = MY_TX_TIMEOUT;
-
- err = register_netdev(dev);
- if (err)
- goto out2;
- return 0;
-out2:
-#ifdef jumpered_dma
- free_dma(dev->dma);
-#endif
-out1:
-#ifdef jumpered_interrupts
- free_irq(dev->irq, dev);
-#endif
-out:
- release_region(base_addr, NETCARD_IO_EXTENT);
- return err;
-}
-
-static void net_tx_timeout(struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
-
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
-
- /* Try to restart the adaptor. */
- chipset_init(dev, 1);
-
- np->stats.tx_errors++;
-
- /* If we have space available to accept new transmit
- * requests, wake up the queueing layer. This would
- * be the case if the chipset_init() call above just
- * flushes out the tx queue and empties it.
- *
- * If instead, the tx queue is retained then the
- * netif_wake_queue() call should be placed in the
- * TX completion interrupt handler of the driver instead
- * of here.
- */
- if (!tx_full(dev))
- netif_wake_queue(dev);
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is non-reboot way to recover if something goes wrong.
- */
-static int
-net_open(struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- /*
- * This is used if the interrupt line can turned off (shared).
- * See 3c503.c for an example of selecting the IRQ at config-time.
- */
- if (request_irq(dev->irq, net_interrupt, 0, cardname, dev)) {
- return -EAGAIN;
- }
- /*
- * Always allocate the DMA channel after the IRQ,
- * and clean up on failure.
- */
- if (request_dma(dev->dma, cardname)) {
- free_irq(dev->irq, dev);
- return -EAGAIN;
- }
-
- /* Reset the hardware here. Don't forget to set the station address. */
- chipset_init(dev, 1);
- outb(0x00, ioaddr);
- np->open_time = jiffies;
-
- /* We are now ready to accept transmit requeusts from
- * the queueing layer of the networking.
- */
- netif_start_queue(dev);
-
- return 0;
-}
-
-/* This will only be invoked if your driver is _not_ in XOFF state.
- * What this means is that you need not check it, and that this
- * invariant will hold if you make sure that the netif_*_queue()
- * calls are done at the proper times.
- */
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned char *buf = skb->data;
-
- /* If some error occurs while trying to transmit this
- * packet, you should return '1' from this function.
- * In such a case you _may not_ do anything to the
- * SKB, it is still owned by the network queueing
- * layer when an error is returned. This means you
- * may not modify any SKB fields, you may not free
- * the SKB, etc.
- */
-
-#if TX_RING
- /* This is the most common case for modern hardware.
- * The spinlock protects this code from the TX complete
- * hardware interrupt handler. Queue flow control is
- * thus managed under this lock as well.
- */
- unsigned long flags;
- spin_lock_irqsave(&np->lock, flags);
-
- add_to_tx_ring(np, skb, length);
- dev->trans_start = jiffies;
-
- /* If we just used up the very last entry in the
- * TX ring on this device, tell the queueing
- * layer to send no more.
- */
- if (tx_full(dev))
- netif_stop_queue(dev);
-
- /* When the TX completion hw interrupt arrives, this
- * is when the transmit statistics are updated.
- */
-
- spin_unlock_irqrestore(&np->lock, flags);
-#else
- /* This is the case for older hardware which takes
- * a single transmit buffer at a time, and it is
- * just written to the device via PIO.
- *
- * No spin locking is needed since there is no TX complete
- * event. If by chance your card does have a TX complete
- * hardware IRQ then you may need to utilize np->lock here.
- */
- hardware_send_packet(ioaddr, buf, length);
- np->stats.tx_bytes += skb->len;
-
- dev->trans_start = jiffies;
-
- /* You might need to clean up and record Tx statistics here. */
- if (inw(ioaddr) == /*RU*/81)
- np->stats.tx_aborted_errors++;
- dev_kfree_skb (skb);
-#endif
-
- return NETDEV_TX_OK;
-}
-
-#if TX_RING
-/* This handles TX complete events posted by the device
- * via interrupts.
- */
-void net_tx(struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
- int entry;
-
- /* This protects us from concurrent execution of
- * our dev->hard_start_xmit function above.
- */
- spin_lock(&np->lock);
-
- entry = np->tx_old;
- while (tx_entry_is_sent(np, entry)) {
- struct sk_buff *skb = np->skbs[entry];
-
- np->stats.tx_bytes += skb->len;
- dev_kfree_skb_irq (skb);
-
- entry = next_tx_entry(np, entry);
- }
- np->tx_old = entry;
-
- /* If we had stopped the queue due to a "tx full"
- * condition, and space has now been made available,
- * wake up the queue.
- */
- if (netif_queue_stopped(dev) && ! tx_full(dev))
- netif_wake_queue(dev);
-
- spin_unlock(&np->lock);
-}
-#endif
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-static irqreturn_t net_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *np;
- int ioaddr, status;
- int handled = 0;
-
- ioaddr = dev->base_addr;
-
- np = netdev_priv(dev);
- status = inw(ioaddr + 0);
-
- if (status == 0)
- goto out;
- handled = 1;
-
- if (status & RX_INTR) {
- /* Got a packet(s). */
- net_rx(dev);
- }
-#if TX_RING
- if (status & TX_INTR) {
- /* Transmit complete. */
- net_tx(dev);
- np->stats.tx_packets++;
- netif_wake_queue(dev);
- }
-#endif
- if (status & COUNTERS_INTR) {
- /* Increment the appropriate 'localstats' field. */
- np->stats.tx_window_errors++;
- }
-out:
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void
-net_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int boguscount = 10;
-
- do {
- int status = inw(ioaddr);
- int pkt_len = inw(ioaddr);
-
- if (pkt_len == 0) /* Read all the frames? */
- break; /* Done for now */
-
- if (status & 0x40) { /* There was an error. */
- lp->stats.rx_errors++;
- if (status & 0x20) lp->stats.rx_frame_errors++;
- if (status & 0x10) lp->stats.rx_over_errors++;
- if (status & 0x08) lp->stats.rx_crc_errors++;
- if (status & 0x04) lp->stats.rx_fifo_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- lp->stats.rx_bytes+=pkt_len;
-
- skb = dev_alloc_skb(pkt_len);
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- break;
- }
- skb->dev = dev;
-
- /* 'skb->data' points to the start of sk_buff data area. */
- memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start,
- pkt_len);
- /* or */
- insw(ioaddr, skb->data, (pkt_len + 1) >> 1);
-
- netif_rx(skb);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
- }
- } while (--boguscount);
-
- return;
-}
-
-/* The inverse routine to net_open(). */
-static int
-net_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- lp->open_time = 0;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx here. */
-
- disable_dma(dev->dma);
-
- /* If not IRQ or DMA jumpered, free up the line. */
- outw(0x00, ioaddr+0); /* Release the physical interrupt line. */
-
- free_irq(dev->irq, dev);
- free_dma(dev->dma);
-
- /* Update the statistics here. */
-
- return 0;
-
-}
-
-/*
- * Get the current statistics.
- * This may be called with the card open or closed.
- */
-static struct net_device_stats *net_get_stats(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
-
- /* Update the statistics from the device registers. */
- lp->stats.rx_missed_errors = inw(ioaddr+1);
- return &lp->stats;
-}
-
-/*
- * Set or clear the multicast filter for this adaptor.
- * num_addrs == -1 Promiscuous mode, receive all packets
- * num_addrs == 0 Normal mode, clear multicast list
- * num_addrs > 0 Multicast mode, receive normal and MC packets,
- * and do best-effort filtering.
- */
-static void
-set_multicast_list(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- if (dev->flags&IFF_PROMISC)
- {
- /* Enable promiscuous mode */
- outw(MULTICAST|PROMISC, ioaddr);
- }
- else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
- {
- /* Disable promiscuous mode, use normal mode. */
- hardware_set_filter(NULL);
-
- outw(MULTICAST, ioaddr);
- }
- else if(dev->mc_count)
- {
- /* Walk the address list, and load the filter */
- hardware_set_filter(dev->mc_list);
-
- outw(MULTICAST, ioaddr);
- }
- else
- outw(0, ioaddr);
-}
-
-#ifdef MODULE
-
-static struct net_device *this_device;
-static int io = 0x300;
-static int irq;
-static int dma;
-static int mem;
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- struct net_device *dev;
- int result;
-
- if (io == 0)
- printk(KERN_WARNING "%s: You shouldn't use auto-probing with insmod!\n",
- cardname);
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev)
- return -ENOMEM;
-
- /* Copy the parameters from insmod into the device structure. */
- dev->base_addr = io;
- dev->irq = irq;
- dev->dma = dma;
- dev->mem_start = mem;
- if (do_netcard_probe(dev) == 0) {
- this_device = dev;
- return 0;
- }
- free_netdev(dev);
- return -ENXIO;
-}
-
-void
-cleanup_module(void)
-{
- unregister_netdev(this_device);
- cleanup_card(this_device);
- free_netdev(this_device);
-}
-
-#endif /* MODULE */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 16c91910d6c1..773c59c89691 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -69,6 +69,7 @@
#include <linux/mm.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
+#include <linux/slab.h>
#include <asm/abs_addr.h>
#include <asm/iseries/mf.h>
@@ -384,7 +385,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
NULL
};
-static struct sysfs_ops veth_cnx_sysfs_ops = {
+static const struct sysfs_ops veth_cnx_sysfs_ops = {
.show = veth_cnx_attribute_show
};
@@ -441,7 +442,7 @@ static struct attribute *veth_port_default_attrs[] = {
NULL
};
-static struct sysfs_ops veth_port_sysfs_ops = {
+static const struct sysfs_ops veth_port_sysfs_ops = {
.show = veth_port_attribute_show
};
@@ -958,18 +959,17 @@ static void veth_set_multicast_list(struct net_device *dev)
write_lock_irqsave(&port->mcast_gate, flags);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > VETH_MAX_MCAST)) {
+ (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
- struct dev_mc_list *dmi = dev->mc_list;
- int i;
+ struct dev_mc_list *dmi;
port->promiscuous = 0;
/* Update table */
port->num_mcast = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
u8 *addr = dmi->dmi_addr;
u64 xaddr = 0;
@@ -978,7 +978,6 @@ static void veth_set_multicast_list(struct net_device *dev)
port->mcast_addr[port->num_mcast] = xaddr;
port->num_mcast++;
}
- dmi = dmi->next;
}
}
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 5257ae08b9f9..92d2e71d0c8b 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -75,19 +75,14 @@ struct ixgb_adapter;
#include "ixgb_ee.h"
#include "ixgb_ids.h"
+#define PFX "ixgb: "
+
#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(args...) printk(KERN_DEBUG "ixgb: " args)
+#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
#else
#define IXGB_DBG(args...)
#endif
-#define PFX "ixgb: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __func__ , ## args))
-
-
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
#define MAX_TXD 4096
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index bcd0f01d5feb..c9fef65cb98b 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
@@ -238,8 +238,8 @@ ixgb_up(struct ixgb_adapter *adapter)
if (err) {
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
return err;
}
@@ -310,7 +310,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
ixgb_adapter_stop(hw);
if (!ixgb_init_hw(hw))
- DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
+ netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
/* restore frame size information */
IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
@@ -447,7 +447,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -456,7 +457,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
}
@@ -477,7 +478,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
+ netif_info(adapter, probe, adapter->netdev,
+ "Intel(R) PRO/10GbE Network Connection\n");
ixgb_check_options(adapter);
/* reset the hardware with the new settings */
@@ -552,14 +554,14 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
- if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+ if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
hw->mac_type = ixgb_82597;
else {
/* should never have loaded on this device */
- DPRINTK(PROBE, ERR, "unsupported device id\n");
+ netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
}
/* enable flow control to be programmed */
@@ -661,8 +663,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate transmit descriptor ring memory\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -675,8 +677,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if (!txdr->desc) {
vfree(txdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate transmit descriptor memory\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
@@ -750,8 +752,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate receive descriptor ring\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -765,8 +767,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate receive descriptors\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1077,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
rctl |= IXGB_RCTL_VFE;
}
- if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
@@ -1086,13 +1088,12 @@ ixgb_set_multi(struct net_device *netdev)
IXGB_WRITE_REG(hw, RCTL, rctl);
- for (i = 0, mc_ptr = netdev->mc_list;
- mc_ptr;
- i++, mc_ptr = mc_ptr->next)
- memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
- ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
+ ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
}
@@ -1363,13 +1364,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
}
@@ -1580,7 +1581,8 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
/* MTU < 68 is an error for IPv4 traffic, just don't allow it */
if ((new_mtu < 68) ||
(max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
+ netif_err(adapter, probe, adapter->netdev,
+ "Invalid MTU setting %d\n", new_mtu);
return -EINVAL;
}
@@ -1616,7 +1618,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
return;
if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
@@ -1854,24 +1856,25 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
&& !(IXGB_READ_REG(&adapter->hw, STATUS) &
IXGB_STATUS_TXOFF)) {
/* detected Tx unit hang */
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- IXGB_READ_REG(&adapter->hw, TDH),
- IXGB_READ_REG(&adapter->hw, TDT),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->status);
+ netif_err(adapter, drv, adapter->netdev,
+ "Detected Tx Unit Hang\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ IXGB_READ_REG(&adapter->hw, TDH),
+ IXGB_READ_REG(&adapter->hw, TDT),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->status);
netif_stop_queue(netdev);
}
}
@@ -2269,7 +2272,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
struct ixgb_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2285,14 +2289,16 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
/* Make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, the EEPROM checksum is not valid\n");
return PCI_ERS_RESULT_DISCONNECT;
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, invalid MAC address\n");
return PCI_ERS_RESULT_DISCONNECT;
}
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 21b41f42b61c..8f81efb49169 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -33,7 +33,8 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8da8eb535084..79c35ae3718c 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,22 @@
#define IXGBE_MAX_RSC_INT_RATE 162760
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_VF_FUNCTIONS 64
+#define IXGBE_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define VMDQ_P(p) ((p) + adapter->num_vfs)
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ bool clear_to_send;
+ int rar;
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -159,6 +175,7 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats;
unsigned long reinit_state;
+ int numa_node;
u64 rsc_count; /* stat for coalesced packets */
u64 rsc_flush; /* stats for flushed packets */
u32 restart_queue; /* track tx queue restarts */
@@ -171,7 +188,7 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
RING_F_DCB,
- RING_F_VMDQ,
+ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
#ifdef IXGBE_FCOE
@@ -183,18 +200,21 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_DCB_INDICES 8
#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#else
+#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
+#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
int indices;
int mask;
} ____cacheline_internodealigned_in_smp;
-#define MAX_RX_QUEUES 128
-#define MAX_TX_QUEUES 128
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
@@ -277,7 +297,7 @@ struct ixgbe_adapter {
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
u32 tx_timeout_count;
bool detect_tx_hung;
@@ -286,8 +306,10 @@ struct ixgbe_adapter {
u64 lsc_int;
/* RX */
- struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 non_eop_descs;
@@ -323,13 +345,14 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
-#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
-#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
+#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +402,13 @@ struct ixgbe_adapter {
u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
+
+ int node;
+
+ /* SR-IOV */
+ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
};
enum ixbge_state_t {
@@ -426,6 +456,10 @@ extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
u8 queue);
+extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue);
extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
u16 vlan_id);
extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
@@ -440,6 +474,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 204177d78cec..35a06b47587b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 link_speed = 0;
+ bool link_up;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc)
goto out;
#endif /* CONFIG_DCB */
+ /*
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
+ */
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
+ }
+
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 538340527aa6..12fc0e7ba2ca 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -31,6 +31,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
+#include "ixgbe_mbx.h"
#define IXGBE_82599_MAX_TX_QUEUES 128
#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -38,6 +39,9 @@
#define IXGBE_82599_MC_TBL_SIZE 128
#define IXGBE_82599_VFT_TBL_SIZE 128
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
@@ -67,7 +71,15 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.disable_tx_laser =
+ &ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ &ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
} else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
if ((mac->ops.get_media_type(hw) ==
ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -411,6 +423,67 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
return status;
}
+ /**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(100);
+}
+
+/**
+ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to true to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with true clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
+
+ if (hw->mac.autotry_restart) {
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = false;
+ }
+}
+
/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
@@ -439,16 +512,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
speed &= phy_link_speed;
/*
- * When the driver changes the link speeds that it can support,
- * it sets autotry_restart to true to indicate that we need to
- * initiate a new autotry session with the link partner. To do
- * so, we set the speed then disable and re-enable the tx laser, to
- * alert the link partner that it also needs to restart autotry on its
- * end. This is consistent with true clause 37 autoneg, which also
- * involves a loss of signal.
- */
-
- /*
* Try each speed one by one, highest priority first. We do this in
* software because 10gb fiber doesn't support speed autonegotiation.
*/
@@ -465,6 +528,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
/* Allow module to change analog characteristics (1G->10G) */
msleep(40);
@@ -477,19 +541,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
return status;
/* Flap the tx laser if it has not already been done */
- if (hw->mac.autotry_restart) {
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- udelay(100);
-
- /* Enable tx laser; allow 2ms to light up per spec */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- msleep(2);
-
- hw->mac.autotry_restart = false;
- }
+ hw->mac.ops.flap_tx_laser(hw);
/*
* Wait for the controller to acquire link. Per IEEE 802.3ap,
@@ -524,6 +576,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
/* Allow module to change analog characteristics (10G->1G) */
msleep(40);
@@ -536,19 +589,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
return status;
/* Flap the tx laser if it has not already been done */
- if (hw->mac.autotry_restart) {
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- udelay(100);
-
- /* Enable tx laser; allow 2ms to light up per spec */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- msleep(2);
-
- hw->mac.autotry_restart = false;
- }
+ hw->mac.ops.flap_tx_laser(hw);
/* Wait for the link partner to also set speed */
msleep(100);
@@ -889,7 +930,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
s32 status = 0;
- u32 ctrl, ctrl_ext;
+ u32 ctrl;
u32 i;
u32 autoc;
u32 autoc2;
@@ -944,15 +985,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
- /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
msleep(50);
-
-
/*
* Store the original AUTOC/AUTOC2 values if they have not been
* stored off yet. Otherwise restore the stored original
@@ -1095,9 +1130,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
u32 regindex;
+ u32 vlvf_index;
u32 bitindex;
u32 bits;
u32 first_empty_slot;
+ u32 vt_ctl;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
@@ -1124,76 +1161,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Part 2
- * If the vind is set
+ * If VT mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- if (vind) {
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !first_empty_slot)
- first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
+ vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
+ goto out;
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
+ /* find the vlanid or the first empty slot */
+ first_empty_slot = 0;
+
+ for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
+ if (!bits && !first_empty_slot)
+ first_empty_slot = vlvf_index;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ vlvf_index = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ goto out;
}
+ }
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- }
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
} else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- }
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
}
+ }
- if (bits)
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
- (IXGBE_VLVF_VIEN | vlan));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ /* if bits is non-zero then some pools/VFs are still
+ * using this VLAN ID. Force the VFTA entry to on */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ bits |= (1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
}
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
out:
return 0;
@@ -1434,6 +1479,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+ /* Initialize the drop queue to Rx queue 127 */
+ fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@@ -1675,8 +1723,8 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
* @src_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 src_addr_1, u32 src_addr_2,
- u32 src_addr_3, u32 src_addr_4)
+ u32 src_addr_1, u32 src_addr_2,
+ u32 src_addr_3, u32 src_addr_4)
{
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
@@ -1718,8 +1766,8 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
* @dst_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 dst_addr_1, u32 dst_addr_2,
- u32 dst_addr_3, u32 dst_addr_4)
+ u32 dst_addr_1, u32 dst_addr_2,
+ u32 dst_addr_3, u32 dst_addr_4)
{
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
@@ -1797,7 +1845,7 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
* @vm_pool: the Virtual Machine pool to load
**/
s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
- u8 vm_pool)
+ u8 vm_pool)
{
input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
@@ -1821,8 +1869,7 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
* @input: input stream to search
* @vlan: the VLAN id to load
**/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 *vlan)
+static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
{
*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
@@ -2078,23 +2125,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
+ * @input_masks: bitwise masks for relevant fields
+ * @soft_id: software index into the silicon hash tables for filter storage
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
- u16 soft_id,
- u8 queue)
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4, dst_ipv4;
+ u32 src_ipv4 = 0, dst_ipv4 = 0;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
u16 src_port, dst_port, vlan_id, flex_bytes;
u16 bucket_hash;
u8 l4type;
+ u8 fdirm = 0;
/* Get our input values */
ixgbe_atr_get_l4type_82599(input, &l4type);
@@ -2149,7 +2199,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@@ -2158,7 +2207,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+ (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+ if (src_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
+
+ if (dst_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
+
+ switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+ case IXGBE_ATR_L4TYPE_TCP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (input_masks->dst_port_mask << 16)));
+ break;
+ case IXGBE_ATR_L4TYPE_UDP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (input_masks->src_port_mask << 16)));
+ break;
+ default:
+ /* this already would have failed above */
+ break;
+ }
+
+ /* Program the last mask register, FDIRM */
+ if (input_masks->vlan_id_mask || !vlan_id)
+ /* Mask both VLAN and VLANP - bits 0 and 1 */
+ fdirm |= 0x3;
+
+ if (input_masks->data_mask || !flex_bytes)
+ /* Flex bytes need masking, so mask the whole thing - bit 4 */
+ fdirm |= 0x10;
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ fdirm |= 0x24;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
@@ -2655,4 +2775,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
+ .mbx_ops = &mbx_ops_82599,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 688b8ca5da32..eb49020903c1 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,7 +28,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <linux/list.h>
#include <linux/netdevice.h>
#include "ixgbe.h"
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
- hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
} else {
/* Setup the receive address. */
hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
- hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
/**
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
* @hw: pointer to hardware structure
- * @uc_list: the list of new addresses
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the secondary addrs from
* receive address registers. Uses unused receive address registers for the
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
* manually putting the device into promiscuous mode.
**/
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list)
+ struct net_device *netdev)
{
u32 i;
u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
}
/* Add the new addresses */
- list_for_each_entry(ha, uc_list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
hw_dbg(hw, " Adding the secondary addresses:\n");
ixgbe_add_uc_addr(hw, ha->addr, 0);
}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 27f3214bed2e..13606d4809c9 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
ixgbe_mc_addr_itr func);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list);
+ struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index a1562287342f..9aea4f04bbd2 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 64a9fa15c059..5caafd4afbc3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index f30263898ebc..f0e9279d4669 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index ebbe53c352a7..cc728fa092e2 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index ec8a252636d3..4f7a26ab411e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 9e5e2827e4af..0f3f791e1e1d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 3c7a79a7d7c6..dd4883f642be 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -223,7 +223,7 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
- adapter->dcb_set_bitmap |= BIT_PG_RX;
+ adapter->dcb_set_bitmap |= BIT_PG_TX;
adapter->dcb_set_bitmap |= BIT_RESETLINK;
}
}
@@ -341,6 +341,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (!adapter->dcb_set_bitmap)
return DCB_NO_HW_CHG;
+ ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ adapter->ring_feature[RING_F_DCB].indices);
+
+ if (ret)
+ return DCB_NO_HW_CHG;
+
/*
* Only take down the adapter if the configuration change
* requires a reset.
@@ -359,14 +365,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
- ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
- if (ret) {
- if (adapter->dcb_set_bitmap & BIT_RESETLINK)
- clear_bit(__IXGBE_RESETTING, &adapter->state);
- return DCB_NO_HW_CHG;
- }
-
if (adapter->dcb_cfg.pfc_mode_enable) {
if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
(adapter->hw.fc.current_mode != ixgbe_fc_pfc))
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 0bd49d3b9f65..8f461d5cee77 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
@@ -441,10 +442,8 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data)
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
} else {
- netif_tx_stop_all_queues(netdev);
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
- netif_tx_start_all_queues(netdev);
}
return 0;
}
@@ -834,8 +833,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring = adapter->tx_ring;
- struct ixgbe_ring *rx_ring = adapter->rx_ring;
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
ring->rx_max_pending = IXGBE_MAX_RXD;
ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +866,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
+ if ((new_tx_count == adapter->tx_ring[0]->count) &&
+ (new_rx_count == adapter->rx_ring[0]->count)) {
/* nothing to do */
return 0;
}
@@ -878,25 +877,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
- goto err_setup;
+ goto clear_reset;
}
- temp_tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
+ temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
if (!temp_tx_ring) {
err = -ENOMEM;
- goto err_setup;
+ goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
- memcpy(temp_tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(adapter,
&temp_tx_ring[i]);
@@ -904,28 +902,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (i) {
i--;
ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ &temp_tx_ring[i]);
}
- goto err_setup;
+ goto clear_reset;
}
}
need_update = true;
}
- temp_rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if ((!temp_rx_ring) && (need_update)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
- kfree(temp_tx_ring);
+ temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+ if (!temp_rx_ring) {
err = -ENOMEM;
goto err_setup;
}
if (new_rx_count != adapter->rx_ring_count) {
- memcpy(temp_rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter,
&temp_rx_ring[i]);
@@ -947,22 +941,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
- kfree(adapter->tx_ring);
- adapter->tx_ring = temp_tx_ring;
- temp_tx_ring = NULL;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter,
+ adapter->tx_ring[i]);
+ memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
- kfree(adapter->rx_ring);
- adapter->rx_ring = temp_rx_ring;
- temp_rx_ring = NULL;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter,
+ adapter->rx_ring[i]);
+ memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->rx_ring_count = new_rx_count;
}
ixgbe_up(adapter);
}
+
+ vfree(temp_rx_ring);
err_setup:
+ vfree(temp_tx_ring);
+clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
}
@@ -974,6 +978,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
+ case ETH_SS_NTUPLE_FILTERS:
+ return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
+ ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
default:
return -EOPNOTSUPP;
}
@@ -1007,13 +1014,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
@@ -1627,7 +1634,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- int j = adapter->rx_ring[0].reg_idx;
+ int j = adapter->rx_ring[0]->reg_idx;
u32 k;
for (k = 0; k < 10; k++) {
if (IXGBE_READ_REG(&adapter->hw,
@@ -1847,6 +1854,26 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ int i;
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].clear_to_send) {
+ netdev_warn(netdev, "%s",
+ "offline diagnostic is not "
+ "supported when VFs are "
+ "present\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__IXGBE_TESTING,
+ &adapter->state);
+ goto skip_ol_tests;
+ }
+ }
+ }
+
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
@@ -1867,11 +1894,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (ixgbe_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* If SRIOV or VMDq is enabled then skip MAC
+ * loopback diagnostic. */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+ "mode\n");
+ data[3] = 0;
+ goto skip_loopback;
+ }
+
ixgbe_reset(adapter);
DPRINTK(HW, INFO, "loopback testing starting\n");
if (ixgbe_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+skip_loopback:
ixgbe_reset(adapter);
clear_bit(__IXGBE_TESTING, &adapter->state);
@@ -1891,6 +1929,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
clear_bit(__IXGBE_TESTING, &adapter->state);
}
+skip_ol_tests:
msleep_interruptible(4 * 1000);
}
@@ -2000,7 +2039,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
/* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) {
@@ -2053,7 +2092,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+ adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
/* check the limits */
@@ -2134,23 +2173,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
static int ixgbe_set_flags(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
ethtool_op_set_flags(netdev, data);
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
- return 0;
-
/* if state changes we need to update adapter->flags and reset */
if ((!!(data & ETH_FLAG_LRO)) !=
(!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ need_reset = true;
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
+ (!(data & ETH_FLAG_NTUPLE))) {
+ /* turn off Flow Director perfect, set hash and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
+ (data & ETH_FLAG_NTUPLE)) {
+ /* turn off Flow Director hash, enable perfect and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ } else {
+ /* no state change */
+ }
+
+ if (need_reset) {
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
+
return 0;
+}
+
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+ struct ethtool_rx_ntuple *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
+ struct ixgbe_atr_input input_struct;
+ struct ixgbe_atr_input_masks input_masks;
+ int target_queue;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return -EOPNOTSUPP;
+
+ /*
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Tx queues.
+ */
+ if ((fs.action >= adapter->num_tx_queues) ||
+ (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+ return -EINVAL;
+
+ memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+
+ input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
+ input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
+ input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
+ input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
+ input_masks.vlan_id_mask = fs.vlan_tag_mask;
+ /* only use the lowest 2 bytes for flex bytes */
+ input_masks.data_mask = (fs.data_mask & 0xffff);
+ switch (fs.flow_type) {
+ case TCP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ break;
+ case UDP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ break;
+ case SCTP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Mask bits from the inputs based on user-supplied mask */
+ ixgbe_atr_set_src_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
+ ixgbe_atr_set_dst_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
+ /* 82599 expects these to be byte-swapped for perfect filtering */
+ ixgbe_atr_set_src_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
+ ixgbe_atr_set_dst_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+
+ /* VLAN and Flex bytes are either completely masked or not */
+ if (!fs.vlan_tag_mask)
+ ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+
+ if (!input_masks.data_mask)
+ /* make sure we only use the first 2 bytes of user data */
+ ixgbe_atr_set_flex_byte_82599(&input_struct,
+ (fs.data & 0xffff));
+
+ /* determine if we need to drop or route the packet */
+ if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ target_queue = MAX_RX_QUEUES - 1;
+ else
+ target_queue = fs.action;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
+ &input_masks, 0, target_queue);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2188,6 +2328,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ixgbe_set_flags,
+ .set_rx_ntuple = ixgbe_set_rx_ntuple,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index da32a108a7b4..6493049b663d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -31,6 +31,7 @@
#include "ixgbe_dcb_82599.h"
#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
+#include <linux/gfp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
@@ -202,6 +203,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
while (len) {
+ /* max number of buffers allowed in one DDP context */
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ netif_err(adapter, drv, adapter->netdev,
+ "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough descriptors\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min((bufflen - thisoff), len);
@@ -227,20 +237,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
len -= thislen;
addr += thislen;
j++;
- /* max number of buffers allowed in one DDP context */
- if (j > IXGBE_BUFFCNT_MAX) {
- DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
- "not enough descriptors\n",
- xid, i, j, dmacount, (u64)addr);
- goto out_noddp_free;
- }
}
}
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
- fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT);
+ fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
fcbuff |= (IXGBE_FCBUFF_VALID);
@@ -520,25 +523,40 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
/* Enable L2 eth type filter for FCoE */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
+ /* Enable L2 eth type filter for FIP */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
+ (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
if (adapter->ring_feature[RING_F_FCOE].indices) {
/* Use multiple rx queues for FCoE by redirection table */
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
+ fcoe_i = f->mask;
+ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+ IXGBE_ETQS_QUEUE_EN |
+ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
}
+ /* send FIP frames to the first FCoE queue */
+ fcoe_i = f->mask;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+ IXGBE_ETQS_QUEUE_EN |
+ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
IXGBE_FCRXCTRL_FCOELLI |
@@ -614,9 +632,9 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_FSO;
netdev->vlan_features |= NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
- netdev_features_change(netdev);
ixgbe_init_interrupt_scheme(adapter);
+ netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
@@ -660,11 +678,11 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
netdev->vlan_features &= ~NETIF_F_FSO;
netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = 0;
- netdev_features_change(netdev);
ixgbe_cleanup_fcoe(adapter);
-
ixgbe_init_interrupt_scheme(adapter);
+ netdev_features_change(netdev);
+
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
rc = 0;
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index de8ff53187da..abf4b2b3f252 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index bd64387563f0..6c00ee493a3b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
#include <linux/tcp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
+#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
@@ -45,14 +46,15 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.44-k2"
+#define DRV_VERSION "2.0.62-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -67,7 +69,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +126,13 @@ static struct notifier_block dca_notifier = {
};
#endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+ "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
@@ -131,6 +140,41 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+ if (adapter->vfinfo)
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -262,10 +306,12 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
int reg_idx = tx_ring->reg_idx;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
tc = reg_idx >> 2;
txoff = IXGBE_TFCS_TXOFF0;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
tc = 0;
txoff = IXGBE_TFCS_TXOFF;
if (dcb_i == 8) {
@@ -284,6 +330,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
tc += (reg_idx - 96) >> 4;
}
}
+ break;
+ default:
+ tc = 0;
}
txoff <<= tc;
}
@@ -446,7 +495,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
{
u32 rxctrl;
int cpu = get_cpu();
- int q = rx_ring - adapter->rx_ring;
+ int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@ -474,7 +523,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
{
u32 txctrl;
int cpu = get_cpu();
- int q = tx_ring - adapter->tx_ring;
+ int q = tx_ring->reg_idx;
struct ixgbe_hw *hw = &adapter->hw;
if (tx_ring->cpu != cpu) {
@@ -508,12 +557,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].cpu = -1;
- ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+ adapter->tx_ring[i]->cpu = -1;
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].cpu = -1;
- ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+ adapter->rx_ring[i]->cpu = -1;
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
}
}
@@ -770,6 +819,12 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
return skb;
}
+struct ixgbe_rsc_cb {
+ dma_addr_t dma;
+};
+
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
@@ -801,6 +856,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
(*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
@@ -818,9 +874,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (!(staterr & IXGBE_RXD_STAT_EOP)) &&
+ (!(skb->prev)))
+ /*
+ * When HWRSC is enabled, delay unmapping
+ * of the first packet. It carries the
+ * header information, HW may still
+ * access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
+ else
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
@@ -868,6 +936,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ if (IXGBE_RSC_CB(skb)->dma) {
+ pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ IXGBE_RSC_CB(skb)->dma = 0;
+ }
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
else
@@ -979,12 +1053,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_bit(...) */
+ /* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
+ j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
@@ -994,7 +1068,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
+ j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
@@ -1020,7 +1094,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ if (adapter->num_vfs)
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+ else
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -1129,7 +1208,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1144,7 +1223,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1249,6 +1328,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
if (hw->mac.type == ixgbe_mac_82598EB)
ixgbe_check_fan_failure(adapter, eicr);
@@ -1263,7 +1345,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- &adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
@@ -1322,7 +1404,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1350,7 +1432,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1380,7 +1462,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1389,7 +1471,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1420,7 +1502,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, rx_ring);
@@ -1461,7 +1543,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, ring);
@@ -1477,7 +1559,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, ring);
@@ -1488,7 +1570,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
@@ -1521,7 +1603,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, tx_ring);
@@ -1706,8 +1788,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
- struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -1763,6 +1845,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ if (adapter->num_vfs)
+ mask |= IXGBE_EIMS_MAILBOX;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1771,6 +1855,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
+
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
}
/**
@@ -1812,10 +1901,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0].total_packets = 0;
- adapter->tx_ring[0].total_bytes = 0;
- adapter->rx_ring[0].total_packets = 0;
- adapter->rx_ring[0].total_bytes = 0;
+ adapter->tx_ring[0]->total_packets = 0;
+ adapter->tx_ring[0]->total_bytes = 0;
+ adapter->rx_ring[0]->total_packets = 0;
+ adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
@@ -1900,6 +1989,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ if (adapter->num_vfs > 32)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1945,7 +2036,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = &adapter->tx_ring[i];
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
j = ring->reg_idx;
tdba = ring->dma;
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -1955,8 +2046,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_TDH(j);
- adapter->tx_ring[i].tail = IXGBE_TDT(j);
+ adapter->tx_ring[i]->head = IXGBE_TDH(j);
+ adapter->tx_ring[i]->tail = IXGBE_TDT(j);
/*
* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
@@ -1984,18 +2075,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) {
u32 rttdcs;
+ u32 mask;
/* disable the arbiter while setting MTQC */
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
rttdcs |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- /* We enable 8 traffic classes, DCB only */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
- IXGBE_MTQC_8TC_8TQ));
- else
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
/* re-eable the arbiter */
rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2054,12 +2159,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
| IXGBE_FLAG_DCB_ENABLED
#endif
+ | IXGBE_FLAG_SRIOV_ENABLED
);
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
mrqc = IXGBE_MRQC_RSSEN;
break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
#ifdef CONFIG_IXGBE_DCB
case (IXGBE_FLAG_DCB_ENABLED):
mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2085,7 +2194,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
u32 rscctrl;
int rx_buf_len;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
j = rx_ring->reg_idx;
rx_buf_len = rx_ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@ -2140,7 +2249,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
int rx_buf_len;
/* Decide whether to use packet split mode or not */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (!adapter->num_vfs)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2152,7 +2263,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PSRTYPE(adapter->num_vfs),
+ psrtype);
}
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2179,7 +2292,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
#endif
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2189,7 +2302,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
+ rx_ring = adapter->rx_ring[i];
rdba = rx_ring->dma;
j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
@@ -2238,6 +2351,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+ | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs <<
+ IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = adapter->num_vfs / 32;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ ixgbe_set_vmolr(hw, adapter->num_vfs);
+ }
+
/* Program MRQC for the distribution of queues */
mrqc = ixgbe_setup_mrqc(adapter);
@@ -2269,6 +2406,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if (adapter->num_vfs) {
+ u32 reg;
+
+ /* Map PF MAC address in RAR Entry 0 to first pool
+ * following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /* Set up VF register offsets for selected VT Mode, i.e.
+ * 64 VFs for SR-IOV */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ reg |= IXGBE_GCR_EXT_SRIOV;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+ }
+
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2307,15 +2458,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
}
static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2326,7 +2479,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
ixgbe_irq_enable(adapter);
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2356,7 +2509,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 ctrl;
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@ -2409,7 +2562,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
* responsible for configuring the hardware for proper unicast, multicast and
* promiscuous mode.
**/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -2441,14 +2594,16 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
+ hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
+ if (adapter->num_vfs)
+ ixgbe_restore_vf_multicasts(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2517,7 +2672,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl |= 32;
@@ -2534,7 +2689,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -2574,7 +2729,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].atr_sample_rate =
+ adapter->tx_ring[i]->atr_sample_rate =
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
@@ -2584,8 +2739,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
- (adapter->rx_ring[i].count - 1));
+ ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
+ (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -2668,7 +2823,7 @@ link_cfg_out:
static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
int rxr)
{
- int j = adapter->rx_ring[rxr].reg_idx;
+ int j = adapter->rx_ring[rxr]->reg_idx;
int k;
for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
@@ -2682,8 +2837,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
"not set within the polling period\n", rxr);
}
- ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr]->count - 1));
}
static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -2697,6 +2852,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
u32 txdctl, rxdctl, mhadd;
u32 dmatxctl;
u32 gpie;
+ u32 ctrl_ext;
ixgbe_get_hw_control(adapter);
@@ -2709,6 +2865,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* MSI only */
gpie = 0;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2765,7 +2925,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
@@ -2779,14 +2939,26 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ int wait_loop = 10;
+ /* poll for Tx Enable ready */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+ } while (--wait_loop &&
+ !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ DPRINTK(DRV, ERR, "Could not enable "
+ "Tx Queue %d\n", j);
+ }
}
for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH=0 descriptors (to minimize latency on fetch),
@@ -2810,6 +2982,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
else
ixgbe_configure_msi_and_legacy(adapter);
+ /* enable the optics */
+ if (hw->phy.multispeed_fiber)
+ hw->mac.ops.enable_tx_laser(hw);
+
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -2860,7 +3036,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
/* enable transmits */
netif_tx_start_all_queues(netdev);
@@ -2870,6 +3046,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
return 0;
}
@@ -2879,6 +3061,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1);
ixgbe_down(adapter);
+ /*
+ * If SR-IOV enabled then wait a bit before bringing the adapter
+ * back up to give the VFs time to respond to the reset. The
+ * two second wait is based upon the watchdog timer cycle in
+ * the VF driver.
+ */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ msleep(2000);
ixgbe_up(adapter);
clear_bit(__IXGBE_RESETTING, &adapter->state);
}
@@ -2918,7 +3108,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
}
/**
@@ -2950,6 +3141,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info->skb = NULL;
do {
struct sk_buff *this = skb;
+ if (IXGBE_RSC_CB(this)->dma) {
+ pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ IXGBE_RSC_CB(this)->dma = 0;
+ }
skb = skb->prev;
dev_kfree_skb(this);
} while (skb);
@@ -3024,7 +3221,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -3036,7 +3233,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3050,6 +3247,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
+ /* power down the optics */
+ if (hw->phy.multispeed_fiber)
+ hw->mac.ops.disable_tx_laser(hw);
+
+ /* disable receive for all VFs and wait one second */
+ if (adapter->num_vfs) {
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+ }
+
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3076,7 +3290,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -3089,6 +3303,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
+ /* clear n-tuple filters that are cached */
+ ethtool_ntuple_flush(netdev);
+
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
@@ -3116,13 +3333,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
}
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
- ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
+ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
@@ -3286,6 +3503,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
}
#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
@@ -3299,6 +3529,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
+
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
@@ -3388,9 +3627,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
} else {
ret = false;
@@ -3417,8 +3656,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* the number of queues is assumed to be symmetric */
for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i].reg_idx = i << 3;
- adapter->tx_ring[i].reg_idx = i << 2;
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
}
ret = true;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -3436,18 +3675,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
* Rx TC0-TC7 are offset by 16 queues each
*/
for (i = 0; i < 3; i++) {
- adapter->tx_ring[i].reg_idx = i << 5;
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < 5; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 2) << 4);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 8) << 3);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
ret = true;
@@ -3460,12 +3699,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
*
* Rx TC0-TC3 are offset by 32 queues each
*/
- adapter->tx_ring[0].reg_idx = 0;
- adapter->tx_ring[1].reg_idx = 64;
- adapter->tx_ring[2].reg_idx = 96;
- adapter->tx_ring[3].reg_idx = 112;
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i].reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 5;
ret = true;
} else {
@@ -3498,9 +3737,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
@@ -3528,8 +3767,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_dcb(adapter);
/* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
/*
* In 82599, the number of Tx queues for each traffic
* class for both 8-TC and 4-TC modes are:
@@ -3560,8 +3799,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
ret = true;
}
@@ -3570,6 +3809,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
@@ -3583,8 +3840,11 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
- adapter->rx_ring[0].reg_idx = 0;
- adapter->tx_ring[0].reg_idx = 0;
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
@@ -3614,33 +3874,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
-
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_tx_ring_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->tx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_rx_ring_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->rx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
ixgbe_cache_ring_register(adapter);
return 0;
err_rx_ring_allocation:
- kfree(adapter->tx_ring);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ kfree(adapter->tx_ring[i]);
err_tx_ring_allocation:
return -ENOMEM;
}
@@ -3695,6 +3985,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_set_num_queues(adapter);
err = pci_enable_msi(adapter->pdev);
@@ -3736,7 +4029,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
}
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -3863,10 +4160,16 @@ err_set_interrupt:
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
@@ -3937,6 +4240,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -3964,10 +4268,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ if (dev->features & NETIF_F_NTUPLE) {
+ /* Flow Director perfect filter enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ } else {
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ }
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->atr_sample_rate = 20;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4036,6 +4348,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* enable rx csum by default */
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -4055,7 +4370,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4097,7 +4414,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
@@ -4121,7 +4438,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
@@ -4167,7 +4486,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
@@ -4210,8 +4529,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
}
/**
@@ -4247,8 +4566,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
}
/**
@@ -4373,6 +4692,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ /*
+ * pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -4520,8 +4844,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i].rsc_count;
- rsc_flush += adapter->rx_ring[i].rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
@@ -4529,11 +4853,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i]->restart_queue;
adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4717,6 +5041,7 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
autoneg = hw->phy.autoneg_advertised;
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ hw->mac.autotry_restart = false;
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -4772,7 +5097,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters \n");
@@ -4781,6 +5106,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
netif_tx_start_all_queues(adapter->netdev);
}
+static DEFINE_MUTEX(ixgbe_watchdog_lock);
+
/**
* ixgbe_watchdog_task - worker thread to bring link up
* @work: pointer to work_struct containing our data
@@ -4792,13 +5119,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
watchdog_task);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = adapter->link_speed;
- bool link_up = adapter->link_up;
+ u32 link_speed;
+ bool link_up;
int i;
struct ixgbe_ring *tx_ring;
int some_tx_pending = 0;
- adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_lock(&ixgbe_watchdog_lock);
+
+ link_up = adapter->link_up;
+ link_speed = adapter->link_speed;
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -4869,7 +5199,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = &adapter->tx_ring[i];
+ tx_ring = adapter->tx_ring[i];
if (tx_ring->next_to_use != tx_ring->next_to_clean) {
some_tx_pending = 1;
break;
@@ -4887,7 +5217,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
ixgbe_update_stats(adapter);
- adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_unlock(&ixgbe_watchdog_lock);
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -4918,7 +5248,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -5157,19 +5487,19 @@ dma_error:
tx_buffer_info->dma = 0;
tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
- count--;
+ if (count)
+ count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
}
- return count;
+ return 0;
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5319,19 +5649,29 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
return txq;
+ }
#ifdef IXGBE_FCOE
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
- (skb->protocol == htons(ETH_P_FCOE))) {
+ ((skb->protocol == htons(ETH_P_FCOE)) ||
+ (skb->protocol == htons(ETH_P_FIP)))) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
txq += adapter->ring_feature[RING_F_FCOE].mask;
return txq;
}
#endif
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ if (skb->priority == TC_PRIO_CONTROL)
+ txq = adapter->ring_feature[RING_F_DCB].indices-1;
+ else
+ txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
+ >> 13;
+ return txq;
+ }
return skb_tx_hash(dev, skb);
}
@@ -5358,30 +5698,32 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (skb->priority != TC_PRIO_CONTROL) {
- tx_flags |= ((skb->queue_mapping & 0x7) << 13);
- tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IXGBE_TX_FLAGS_VLAN;
- } else {
- skb->queue_mapping =
- adapter->ring_feature[RING_F_DCB].indices-1;
- }
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- tx_ring = &adapter->tx_ring[skb->queue_mapping];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
- if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
- (skb->protocol == htons(ETH_P_FCOE))) {
- tx_flags |= IXGBE_TX_FLAGS_FCOE;
#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
#ifdef CONFIG_IXGBE_DCB
- tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
- << IXGBE_TX_FLAGS_VLAN_SHIFT);
- tx_flags |= ((adapter->fcoe.up << 13)
- << IXGBE_TX_FLAGS_VLAN_SHIFT);
-#endif
+ /* for FCoE with DCB, we force the priority to what
+ * was specified by the switch */
+ if ((skb->protocol == htons(ETH_P_FCOE)) ||
+ (skb->protocol == htons(ETH_P_FIP))) {
+ tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= ((adapter->fcoe.up << 13)
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+ }
#endif
+ /* flag for FCoE offloads */
+ if (skb->protocol == htons(ETH_P_FCOE))
+ tx_flags |= IXGBE_TX_FLAGS_FCOE;
}
+#endif
+
/* four things can cause us to need a context descriptor */
if (skb_is_gso(skb) ||
(skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -5474,7 +5816,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
return 0;
}
@@ -5566,6 +5909,10 @@ static void ixgbe_netpoll(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -5607,6 +5954,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#endif /* IXGBE_FCOE */
};
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for VF "
+ "Data Storage - SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -5627,6 +6029,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ unsigned int indices = num_possible_cpus();
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -5665,7 +6068,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
pci_save_state(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
+ if (ii->mac == ixgbe_mac_82598EB)
+ indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
+ else
+ indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
+
+ indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
+#ifdef IXGBE_FCOE
+ indices += min_t(unsigned int, num_possible_cpus(),
+ IXGBE_MAX_FCOE_INDICES);
+#endif
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -5746,6 +6159,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /* Make it possible the adapter to be woken up via WOL */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
/*
* If there is a fan on this device and it has failed log the
* failure.
@@ -5781,6 +6198,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
+ ixgbe_probe_vf(adapter, ii);
+
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
@@ -5801,6 +6220,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
@@ -5839,6 +6261,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_eeprom;
}
+ /* power down the optics */
+ if (hw->phy.multispeed_fiber)
+ hw->mac.ops.disable_tx_laser(hw);
+
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &ixgbe_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -5854,9 +6280,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
- /* Enable ACPI wakeup in GRC */
- IXGBE_WRITE_REG(hw, IXGBE_GRC,
- (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
break;
default:
adapter->wol = 0;
@@ -5927,6 +6350,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_setup_dca(adapter);
}
#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+ adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -5939,6 +6369,8 @@ err_register:
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->sfp_task);
@@ -6007,6 +6439,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 000000000000..d75f9148eb1f
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+struct ixgbe_mbx_operations mbx_ops_82599 = {
+ .read = ixgbe_read_mbx_pf,
+ .write = ixgbe_write_mbx_pf,
+ .read_posted = ixgbe_read_posted_mbx,
+ .write_posted = ixgbe_write_posted_mbx,
+ .check_for_msg = ixgbe_check_for_msg_pf,
+ .check_for_ack = ixgbe_check_for_ack_pf,
+ .check_for_rst = ixgbe_check_for_rst_pf,
+};
+
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 000000000000..be7ab3309ab7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+extern struct ixgbe_mbx_operations mbx_ops_82599;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 9ecad17522c3..1c1efd386956 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9b700f5bf1ed..9cf5f3b4cc5d 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 000000000000..d4cd20f30199
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,362 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+
+#include "ixgbe_sriov.h"
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ int i;
+
+ /* only so many hash values supported */
+ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ }
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo;
+ int i, j;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ vfinfo = &adapter->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ hw->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+ }
+}
+
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
+{
+ u32 ctrl;
+
+ /* Check if global VLAN already set, if not set it */
+ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+ if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ }
+
+ return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr |= (IXGBE_VMOLR_AUPE |
+ IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE |
+ IXGBE_VMOLR_BAM);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* reset offloads to defaults */
+ ixgbe_set_vmolr(hw, vf);
+
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ if (adapter->vfinfo[vf].rar > 0) {
+ adapter->hw.mac.ops.clear_rar(&adapter->hw,
+ adapter->vfinfo[vf].rar);
+ adapter->vfinfo[vf].rar = -1;
+ }
+}
+
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
+ vf, IXGBE_RAH_AV);
+ if (adapter->vfinfo[vf].rar < 0) {
+ DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
+ return -1;
+ }
+
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+ return 0;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+ unsigned char vf_mac_addr[6];
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ unsigned int vfn = (event_mask & 0x3f);
+
+ bool enable = ((event_mask & 0x10000000U) != 0);
+
+ if (enable) {
+ random_ether_addr(vf_mac_addr);
+ DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
+ "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vfn,
+ vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
+ vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+ /*
+ * Store away the VF "permananet" MAC address, it will ask
+ * for it later.
+ */
+ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+ }
+
+ return 0;
+}
+
+inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+ u32 msgbuf[mbx_size];
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 retval;
+ int entries;
+ u16 *hash_list;
+ int add, vid;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ printk(KERN_ERR "Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_vf_reset_msg(adapter, vf);
+ adapter->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ {
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac))
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ else
+ retval = -1;
+ }
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ hash_list = (u16 *)&msgbuf[1];
+ retval = ixgbe_set_vf_multicasts(adapter, entries,
+ hash_list, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ break;
+ default:
+ DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* disable transmit and receive for all vfs */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+}
+
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++) {
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[i].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+}
+
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 000000000000..51d1106c45a1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf);
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 84650c6ebe03..534affcc38ca 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,7 +30,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
-#include <linux/list.h>
+#include <linux/netdevice.h>
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -277,6 +277,7 @@
#define IXGBE_DTXCTL 0x07E00
#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFDTXGSWC 0x08220
#define IXGBE_DTXMXSZRQ 0x08100
#define IXGBE_DTXTCPFLGL 0x04A88
#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTPCS 0x0CD00
#define IXGBE_RTRUP2TC 0x03020
#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1285,6 +1298,7 @@
#define IXGBE_ETQF_FILTER_BCN 1
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -1295,6 +1309,7 @@
/* VLAN pool filtering masks */
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1843,6 +1858,12 @@
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+
/* Little Endian defines */
#ifndef __le32
#define __le32 u32
@@ -2109,6 +2130,15 @@ struct ixgbe_atr_input {
u8 byte_stream[42];
};
+struct ixgbe_atr_input_masks {
+ u32 src_ip_mask;
+ u32 dst_ip_mask;
+ u16 src_port_mask;
+ u16 dst_port_mask;
+ u16 vlan_id_mask;
+ u16 data_mask;
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -2368,6 +2398,9 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
/* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
+ void (*flap_tx_laser)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2385,7 +2418,7 @@ struct ixgbe_mac_operations {
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
ixgbe_mc_addr_itr);
s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2463,6 +2496,37 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
};
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
struct ixgbe_hw {
u8 __iomem *hw_addr;
void *back;
@@ -2472,6 +2536,7 @@ struct ixgbe_hw {
struct ixgbe_phy_info phy;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -2486,6 +2551,7 @@ struct ixgbe_info {
struct ixgbe_mac_operations *mac_ops;
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
+ struct ixgbe_mbx_operations *mbx_ops;
};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 000000000000..dd4e0d27e8cc
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel 82599 Virtual Function driver
+# Copyright(c) 1999 - 2009 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82599 VF ethernet driver
+#
+
+obj-$(CONFIG_IXGBEVF) += ixgbevf.o
+
+ixgbevf-objs := vf.o \
+ mbx.o \
+ ethtool.o \
+ ixgbevf_main.o
+
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 000000000000..c44fdb05447a
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_DEFINES_H_
+#define _IXGBEVF_DEFINES_H_
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 1
+#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+/* DCA Control */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_MAILBOX | \
+ IXGBE_EIMS_OTHER)
+
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+
+/* Error Codes */
+#define IXGBE_ERR_INVALID_MAC_ADDR -1
+#define IXGBE_ERR_RESET_FAILED -2
+
+#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 000000000000..4680b069b84f
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,731 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+ int saved_reset_offset;
+};
+
+#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
+ offsetof(struct ixgbevf_adapter, m), \
+ offsetof(struct ixgbevf_adapter, b), \
+ offsetof(struct ixgbevf_adapter, r)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
+ stats.saved_reset_vfgprc)},
+ {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
+ stats.saved_reset_vfgptc)},
+ {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
+ stats.saved_reset_vfgorc)},
+ {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
+ stats.saved_reset_vfgotc)},
+ {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+ {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
+ stats.saved_reset_vfmprc)},
+ {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
+ zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
+ zero_base)},
+ {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
+ zero_base)},
+ {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = -1;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
+}
+
+static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ if (data)
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ } else {
+ ixgbevf_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
+{
+ if (data) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ netif_tx_start_all_queues(netdev);
+ }
+ return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+ "IXGBE_VFCTRL",
+ "IXGBE_VFSTATUS",
+ "IXGBE_VFLINKS",
+ "IXGBE_VFRXMEMWRAP",
+ "IXGBE_VFRTIMER",
+ "IXGBE_VTEICR",
+ "IXGBE_VTEICS",
+ "IXGBE_VTEIMS",
+ "IXGBE_VTEIMC",
+ "IXGBE_VTEIAC",
+ "IXGBE_VTEIAM",
+ "IXGBE_VTEITR",
+ "IXGBE_VTIVAR",
+ "IXGBE_VTIVAR_MISC",
+ "IXGBE_VFRDBAL0",
+ "IXGBE_VFRDBAL1",
+ "IXGBE_VFRDBAH0",
+ "IXGBE_VFRDBAH1",
+ "IXGBE_VFRDLEN0",
+ "IXGBE_VFRDLEN1",
+ "IXGBE_VFRDH0",
+ "IXGBE_VFRDH1",
+ "IXGBE_VFRDT0",
+ "IXGBE_VFRDT1",
+ "IXGBE_VFRXDCTL0",
+ "IXGBE_VFRXDCTL1",
+ "IXGBE_VFSRRCTL0",
+ "IXGBE_VFSRRCTL1",
+ "IXGBE_VFPSRTYPE",
+ "IXGBE_VFTDBAL0",
+ "IXGBE_VFTDBAL1",
+ "IXGBE_VFTDBAH0",
+ "IXGBE_VFTDBAH1",
+ "IXGBE_VFTDLEN0",
+ "IXGBE_VFTDLEN1",
+ "IXGBE_VFTDH0",
+ "IXGBE_VFTDH1",
+ "IXGBE_VFTDT0",
+ "IXGBE_VFTDT1",
+ "IXGBE_VFTXDCTL0",
+ "IXGBE_VFTXDCTL1",
+ "IXGBE_VFTDWBAL0",
+ "IXGBE_VFTDWBAL1",
+ "IXGBE_VFTDWBAH0",
+ "IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+ return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u32 regs_len = ixgbevf_get_regs_len(netdev);
+ u8 i;
+
+ memset(p, 0, regs_len);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+ /* Receive DMA */
+ for (i = 0; i < 2; i++)
+ regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+ /* Receive */
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+ /* Transmit */
+ for (i = 0; i < 2; i++)
+ regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+ for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+ hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+ strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IXGBEVF_MAX_RXD;
+ ring->tx_max_pending = IXGBEVF_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+ int i, err;
+ u32 new_rx_count, new_tx_count;
+ bool need_tx_update = false;
+ bool need_rx_update = false;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto err_setup;
+ }
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter,
+ &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ kfree(tx_ring);
+ goto err_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+ }
+ need_tx_update = true;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if ((!rx_ring) && (need_tx_update)) {
+ err = -ENOMEM;
+ goto err_rx_setup;
+ }
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter,
+ &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ goto err_rx_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+ }
+ need_rx_update = true;
+ }
+
+err_rx_setup:
+ /* if rings need to be updated, here's the place to do it in one shot */
+ if (need_tx_update || need_rx_update) {
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+ }
+
+ /* tx */
+ if (need_tx_update) {
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ tx_ring = NULL;
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ /* rx */
+ if (need_rx_update) {
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ rx_ring = NULL;
+ adapter->rx_ring_count = new_rx_count;
+ }
+
+ /* success! */
+ err = 0;
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+err_setup:
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ ixgbevf_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ ixgbe_gstrings_stats[i].base_stat_offset;
+ char *r = (char *)adapter +
+ ixgbe_gstrings_stats[i].saved_reset_offset;
+ data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+ }
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (!link_up)
+ *data = 1;
+
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default VF register test */
+static struct ixgbevf_reg_test reg_test_vf[] = {
+ { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ hw_dbg(&adapter->hw, \
+ "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbevf_reg_test *test;
+ u32 i;
+
+ test = reg_test_vf;
+
+ /*
+ * Perform the register test, looping through the test table
+ * until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ hw_dbg(&adapter->hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbevf_reset(adapter);
+
+ hw_dbg(&adapter->hw, "register testing starting\n");
+ if (ixgbevf_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbevf_reset(adapter);
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ hw_dbg(&adapter->hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_rx_csum = ixgbevf_get_rx_csum,
+ .set_rx_csum = ixgbevf_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgbevf_set_tso,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 000000000000..f7015efbff05
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+ struct ixgbevf_adapter *adapter; /* backlink */
+ void *desc; /* descriptor ring memory */
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+ unsigned int count; /* amount of descriptors */
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+
+ int queue_index; /* needed for multiqueue queue management */
+ union {
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+ };
+
+ u16 head;
+ u16 tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
+ u16 reg_idx; /* holds the special value that gets the hardware register
+ * offset associated with this ring, which is different
+ * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* cpu for tx queue */
+ int cpu;
+#endif
+
+ u64 v_idx; /* maps directly to the index for this ring in the hardware
+ * vector array, can also be used for finding the bit in EICR
+ * and friends that represents the vector for this ring */
+
+ u16 work_limit; /* max work per interrupt */
+ u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+ int indices;
+ int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD 1024
+#define IXGBEVF_DEFAULT_RXD 512
+#define IXGBEVF_MAX_TXD 4096
+#define IXGBEVF_MIN_TXD 64
+#define IXGBEVF_MAX_RXD 4096
+#define IXGBEVF_MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+ struct ixgbevf_adapter *adapter;
+ struct napi_struct napi;
+ DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+ u8 rxr_count; /* Rx ring count assigned to this vector */
+ u8 txr_count; /* Tx ring count assigned to this vector */
+ u8 tx_itr;
+ u8 rx_itr;
+ u32 eitr;
+ int v_idx; /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+ struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
+ struct vlan_group *vlgrp;
+#endif
+ u16 bd_number;
+ struct work_struct reset_task;
+ struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct ixgbevf_ring *tx_ring; /* One per active queue */
+ int num_tx_queues;
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ bool detect_tx_hung;
+
+ /* RX */
+ struct ixgbevf_ring *rx_ring; /* One per active queue */
+ int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in ixgbe_vf.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbevf_hw_stats stats;
+ u64 zero_base;
+ /* Interrupt Throttle Rate */
+ u32 eitr_param;
+
+ unsigned long state;
+ u32 *config_space;
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+ __IXGBEVF_TESTING,
+ __IXGBEVF_RESETTING,
+ __IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+ board_82599_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+ printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 000000000000..0cd6202dfacc
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3600 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbevf.h"
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+ "Intel(R) 82599 Virtual Function";
+
+#define DRV_VERSION "1.0.0-k0"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ [board_82599_vf] = &ixgbevf_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+ board_82599_vf},
+
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg);
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+ struct ixgbevf_ring *rx_ring,
+ u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+ }
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_tx_buffer
+ *tx_buffer_info)
+{
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
+ if (tx_buffer_info->skb) {
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ }
+ tx_buffer_info->time_stamp = 0;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ unsigned int eop)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 head, tail;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of eop */
+ head = readl(hw->hw_addr + tx_ring->head);
+ tail = readl(hw->hw_addr + tx_ring->tail);
+ adapter->detect_tx_hung = false;
+ if ((head != tail) &&
+ tx_ring->tx_buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
+ /* detected Tx unit hang */
+ union ixgbe_adv_tx_desc *tx_desc;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ printk(KERN_ERR "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ head, tail,
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ return true;
+ }
+
+ return false;
+}
+
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int i, eop, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ (count < tx_ring->work_limit)) {
+ bool cleaned = false;
+ for ( ; !cleaned; count++) {
+ struct sk_buff *skb;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ cleaned = (i == eop);
+ skb = tx_buffer_info->skb;
+
+ if (cleaned && skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ ixgbevf_unmap_and_free_tx_resource(adapter,
+ tx_buffer_info);
+
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+#ifdef HAVE_TX_MQ
+ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ }
+#else
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+#endif
+ }
+
+ if (adapter->detect_tx_hung) {
+ if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
+ /* schedule immediate reset if we believe we hung */
+ printk(KERN_INFO
+ "tx hang %d detected, resetting adapter\n",
+ adapter->tx_timeout_count + 1);
+ ixgbevf_tx_timeout(adapter->netdev);
+ }
+ }
+
+ /* re-arm the interrupt */
+ if ((count >= tx_ring->work_limit) &&
+ (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+ }
+
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+
+ return (count < tx_ring->work_limit);
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+ int ret;
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+ if (adapter->vlgrp && is_vlan)
+ vlan_gro_receive(&q_vector->napi,
+ adapter->vlgrp,
+ tag, skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+ } else {
+ if (adapter->vlgrp && is_vlan)
+ ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ else
+ ret = netif_rx(skb);
+ }
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbevf_rx_buffer *bi;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ bi = &rx_ring->rx_buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+ if (!bi->page_dma &&
+ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ bi->page_offset = 0;
+ } else {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= (PAGE_SIZE / 2);
+ }
+
+ bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_offset,
+ (PAGE_SIZE / 2),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ skb = bi->skb;
+ if (!skb) {
+ skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ bi->skb = skb;
+ }
+ if (!bi->dma) {
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ bi = &rx_ring->rx_buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i-- == 0)
+ i = (rx_ring->count - 1);
+
+ ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+ }
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len, staterr;
+ u16 hdr_info;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hdr_info & IXGBE_RXDADV_SPH)
+ adapter->rx_hdr_split++;
+ if (len > IXGBEVF_RX_HDR_SIZE)
+ len = IXGBEVF_RX_HDR_SIZE;
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+ cleaned = true;
+ skb = rx_buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ rx_buffer_info->skb = NULL;
+
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ skb_put(skb, len);
+ }
+
+ if (upper_len) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+ (page_count(rx_buffer_info->page) != 1))
+ rx_buffer_info->page = NULL;
+ else
+ get_page(rx_buffer_info->page);
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ next_buffer = &rx_ring->rx_buffer_info[i];
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ adapter->non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ ixgbevf_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /*
+ * Work around issue of some types of VM to VM loop back
+ * packets not getting split correctly
+ */
+ if (staterr & IXGBE_RXD_STAT_LB) {
+ u32 header_fixup_len = skb->len - skb->data_len;
+ if (header_fixup_len < 14)
+ skb_push(skb, header_fixup_len);
+ }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ adapter->netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+ if (cleaned_count)
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+ rx_ring->total_packets += total_rx_packets;
+ rx_ring->total_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+
+ return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+ }
+
+ return work_done;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0, i;
+ long r_idx;
+ u64 enable_mask = 0;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ budget /= (q_vector->rxr_count ?: 1);
+ budget = max(budget, 1);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ enable_mask |= rx_ring->v_idx;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+ if (!netif_running(adapter->netdev))
+ work_done = 0;
+
+#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, enable_mask);
+ }
+
+ return work_done;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_q_vector *q_vector;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j, q_vectors, v_idx, r_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ /* XXX for_each_set_bit(...) */
+ r_idx = find_first_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues);
+
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ j = adapter->rx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 0, j, v_idx);
+ r_idx = find_next_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues,
+ r_idx + 1);
+ }
+ r_idx = find_first_bit(q_vector->txr_idx,
+ adapter->num_tx_queues);
+
+ for (i = 0; i < q_vector->txr_count; i++) {
+ j = adapter->tx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 1, j, v_idx);
+ r_idx = find_next_bit(q_vector->txr_idx,
+ adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ /* if this is a tx only vector halve the interrupt rate */
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else if (q_vector->rxr_count)
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
+
+ ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ }
+
+ ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~IXGBE_EIMS_OTHER;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ u32 timepassed_us;
+ u64 bytes_perint;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = 1000000/eitr;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ retval = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ retval = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ retval = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ retval = low_latency;
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ u32 new_itr;
+ u8 current_itr, ret_itr;
+ int i, r_idx, v_idx = q_vector->v_idx;
+ struct ixgbevf_ring *rx_ring, *tx_ring;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+ q_vector->tx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+ q_vector->rx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 100000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ default:
+ new_itr = 8000;
+ break;
+ }
+
+ if (new_itr != q_vector->eitr) {
+ u32 itr_reg;
+
+ /* save the algorithm value here, not the smoothed one */
+ q_vector->eitr = new_itr;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+ ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ }
+
+ return;
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+ u32 msg;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 1));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *tx_ring;
+ int i, r_idx;
+
+ if (!q_vector->txr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring->total_bytes = 0;
+ tx_ring->total_packets = 0;
+ ixgbevf_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ int r_idx;
+ int i;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring->total_bytes = 0;
+ rx_ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* disable interrupts on this vector only */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+ napi_schedule(&q_vector->napi);
+
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
+{
+ ixgbevf_msix_clean_rx(irq, data);
+ ixgbevf_msix_clean_tx(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
+ a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
+ a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /*
+ * If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ */
+ /* Re-adjusting *qpv takes care of the remainder. */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ irqreturn_t (*handler)(int, void *);
+ int i, vector, q_vectors, err;
+ int ri = 0, ti = 0;
+
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbevf_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
+ NULL)
+ for (vector = 0; vector < q_vectors; vector++) {
+ handler = SET_HANDLER(adapter->q_vector[vector]);
+
+ if (handler == &ixgbevf_msix_clean_rx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "rx", ri++);
+ } else if (handler == &ixgbevf_msix_clean_tx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "tx", ti++);
+ } else if (handler == &ixgbevf_msix_clean_many) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "TxRx", vector);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(adapter->msix_entries[vector].vector,
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ }
+
+ sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq for msix_mbx failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ for (i = vector - 1; i >= 0; i--)
+ free_irq(adapter->msix_entries[--vector].vector,
+ &(adapter->q_vector[i]));
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+ bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+ q_vector->rxr_count = 0;
+ q_vector->txr_count = 0;
+ q_vector->eitr = adapter->eitr_param;
+ }
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+
+ err = ixgbevf_request_msix_irqs(adapter);
+
+ if (err)
+ hw_dbg(&adapter->hw,
+ "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+
+ i = q_vectors - 1;
+
+ free_irq(adapter->msix_entries[i].vector, netdev);
+ i--;
+
+ for (; i >= 0; i--) {
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+
+ ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+ bool queues, bool flush)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
+ u64 qmask;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ qmask = ~0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ if (queues)
+ ixgbevf_irq_enable_queues(adapter, qmask);
+
+ if (flush)
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+ u64 tdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j, tdlen, txctrl;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+ j = ring->reg_idx;
+ tdba = ring->dma;
+ tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+ adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+ adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+ /* Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+ }
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+ struct ixgbevf_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 srrctl;
+
+ rx_ring = &adapter->rx_ring[index];
+
+ srrctl = IXGBE_SRRCTL_DROP_EN;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ u16 bufsz = IXGBEVF_RXBUFFER_2048;
+ /* grow the amount we can receive on large page machines */
+ if (bufsz < (PAGE_SIZE / 2))
+ bufsz = (PAGE_SIZE / 2);
+ /* cap the bufsz at our largest descriptor size */
+ bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+ srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ } else {
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+ u64 rdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, j;
+ u32 rdlen;
+ int rx_buf_len;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+ rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rdba = adapter->rx_ring[i].dma;
+ j = adapter->rx_ring[i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+ (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+ adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+ adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+ ixgbevf_configure_srrctl(adapter, j);
+ }
+}
+
+static void ixgbevf_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j;
+ u32 ctrl;
+
+ adapter->vlgrp = grp;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
+ }
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *v_netdev;
+
+ /* add VID to filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, true);
+ /*
+ * Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_disable(adapter);
+
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable(adapter, true, true);
+
+ /* remove VID from filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, false);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+ ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+ ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq)
+{
+ struct dev_mc_list *mc_ptr;
+ u8 *addr = *mc_addr_ptr;
+ *vmdq = 0;
+
+ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+ if (mc_ptr->next)
+ *mc_addr_ptr = mc_ptr->next->dmi_addr;
+ else
+ *mc_addr_ptr = NULL;
+
+ return addr;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *addr_list = NULL;
+ int addr_count = 0;
+
+ /* reprogram multicast list */
+ addr_count = netdev_mc_count(netdev);
+ if (addr_count)
+ addr_list = netdev->mc_list->dmi_addr;
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+ ixgbevf_addr_list_itr);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi = &q_vector->napi;
+ if (q_vector->rxr_count > 1)
+ napi->poll = &ixgbevf_clean_rxonly_many;
+
+ napi_enable(napi);
+ }
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi_disable(&q_vector->napi);
+ }
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ ixgbevf_set_rx_mode(netdev);
+
+ ixgbevf_restore_vlan(adapter);
+
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+ ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ }
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j = adapter->rx_ring[rxr].reg_idx;
+ int k;
+
+ for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msleep(1);
+ }
+ if (k >= IXGBE_MAX_RX_DESC_POLL) {
+ hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+ "not set within the polling period\n", rxr);
+ }
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
+{
+ /* Only save pre-reset stats if there are some */
+ if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
+ adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
+ adapter->stats.base_vfgprc;
+ adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
+ adapter->stats.base_vfgptc;
+ adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
+ adapter->stats.base_vfgorc;
+ adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
+ adapter->stats.base_vfgotc;
+ adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
+ adapter->stats.base_vfmprc;
+ }
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+ adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+ adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+ adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+ adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+ adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j = 0;
+ int num_rx_rings = adapter->num_rx_queues;
+ u32 txdctl, rxdctl;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < num_rx_rings; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ ixgbevf_rx_desc_queue_enable(adapter, i);
+ }
+
+ ixgbevf_configure_msix(adapter);
+
+ if (hw->mac.ops.set_rar) {
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+ }
+
+ clear_bit(__IXGBEVF_DOWN, &adapter->state);
+ ixgbevf_napi_enable_all(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(netdev);
+
+ ixgbevf_save_reset_stats(adapter);
+ ixgbevf_init_last_counter_stats(adapter);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+ int err;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbevf_configure(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ if (rx_ring->head)
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ if (rx_ring->tail)
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (tx_ring->head)
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ if (tx_ring->tail)
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txdctl;
+ int i, j;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+ /* disable receives */
+
+ netif_tx_disable(netdev);
+
+ msleep(10);
+
+ netif_tx_stop_all_queues(netdev);
+
+ ixgbevf_irq_disable(adapter);
+
+ ixgbevf_napi_disable_all(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ /* can't call flush scheduled work here because it can deadlock
+ * if linkwatch_event tries to acquire the rtnl_lock which we are
+ * holding */
+ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+ msleep(1);
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ }
+
+ netif_carrier_off(netdev);
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbevf_reset(adapter);
+
+ ixgbevf_clean_all_tx_rings(adapter);
+ ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ WARN_ON(in_interrupt());
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ /*
+ * Check if PF is up before re-init. If not then skip until
+ * later when the PF is up and ready to service requests from
+ * the VF via mailbox. If the VF is up and running then the
+ * watchdog task will continue to schedule reset tasks until
+ * the PF is up and running.
+ */
+ if (!hw->mac.ops.reset_hw(hw)) {
+ ixgbevf_down(adapter);
+ ixgbevf_up(adapter);
+ }
+
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ if (hw->mac.ops.reset_hw(hw))
+ hw_dbg(hw, "PF still resetting\n");
+ else
+ hw->mac.ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ hw_dbg(&adapter->hw,
+ "Unable to allocate MSI-X interrupts\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err_tx_ring_allocation;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err_rx_ring_allocation;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i].count = adapter->tx_ring_count;
+ adapter->tx_ring[i].queue_index = i;
+ adapter->tx_ring[i].reg_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i].count = adapter->rx_ring_count;
+ adapter->rx_ring[i].queue_index = i;
+ adapter->rx_ring[i].reg_idx = i;
+ }
+
+ return 0;
+
+err_rx_ring_allocation:
+ kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbevf_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbevf_clean_rxonly;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = q_idx;
+ q_vector->eitr = adapter->eitr_param;
+ if (q_idx < napi_vectors)
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ ixgbevf_set_num_queues(adapter);
+
+ err = ixgbevf_set_interrupt_capability(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbevf_alloc_q_vectors(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbevf_alloc_queues(adapter);
+ if (err) {
+ printk(KERN_ERR "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+ "Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" :
+ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ return 0;
+err_alloc_queues:
+ ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ hw->mbx.ops.init_params(hw);
+ hw->mac.max_tx_queues = MAX_TX_QUEUES;
+ hw->mac.max_rx_queues = MAX_RX_QUEUES;
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address\n");
+ random_ether_addr(hw->mac.addr);
+ } else {
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
+ printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+ }
+
+ /* Enable dynamic interrupt throttling rates */
+ adapter->eitr_param = 20000;
+ adapter->itr_setting = 1;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+ return err;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+ adapter->stats.vfgprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+ adapter->stats.vfgptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.last_vfgorc,
+ adapter->stats.vfgorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.last_vfgotc,
+ adapter->stats.vfgotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+ adapter->stats.vfmprc);
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.vfmprc -
+ adapter->stats.base_vfmprc;
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+ struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /*
+ * Do the watchdog outside of interrupt context due to the lovely
+ * delays that some of the newer hardware requires
+ */
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ goto watchdog_short_circuit;
+
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= (1 << i);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ adapter->tx_timeout_count++;
+
+ ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter = container_of(work,
+ struct ixgbevf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+
+ adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+ /*
+ * Always check the link on the watchdog because we have
+ * no LSC interrupt
+ */
+ if (hw->mac.ops.check_link) {
+ if ((hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false)) != 0) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
+ }
+ } else {
+ /* always assume link is up, if no check link
+ * function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+
+ if (link_up) {
+ if (!netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
+ ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ "10 Gbps\n" : "1 Gbps\n"));
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else {
+ /* Force detection of hung controller */
+ adapter->detect_tx_hung = true;
+ }
+ } else {
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+ if (netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ }
+
+ ixgbevf_update_stats(adapter);
+
+pf_has_reset:
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* Reset the timer */
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+
+ adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i].desc)
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vmalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->work_limit = tx_ring->count;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+ "descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vmalloc(size);
+ if (!rx_ring->rx_buffer_info) {
+ hw_dbg(&adapter->hw,
+ "Unable to vmalloc buffer memory for "
+ "the receive descriptor ring\n");
+ goto alloc_failed;
+ }
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma);
+
+ if (!rx_ring->desc) {
+ hw_dbg(&adapter->hw,
+ "Unable to allocate memory for "
+ "the receive descriptor ring\n");
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ goto alloc_failed;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+alloc_failed:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i].desc)
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ if (hw->adapter_stopped) {
+ ixgbevf_reset(adapter);
+ /* if adapter is still stopped then PF isn't up and
+ * the vf can't start. */
+ if (hw->adapter_stopped) {
+ err = IXGBE_ERR_MBX;
+ printk(KERN_ERR "Unable to start - perhaps the PF"
+ " Driver isn't up yet\n");
+ goto err_setup_reset;
+ }
+ }
+
+ /* allocate transmit descriptors */
+ err = ixgbevf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbevf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbevf_configure(adapter);
+
+ /*
+ * Map the Tx/Rx rings to the vectors we were allotted.
+ * if request_irq will be called in this function map_rings
+ * must be called *before* up_complete
+ */
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+ if (err)
+ goto err_up;
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ err = ixgbevf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return 0;
+
+err_req_irq:
+ ixgbevf_down(adapter);
+err_up:
+ ixgbevf_free_irq(adapter);
+err_setup_rx:
+ ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+ return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+ u32 mss_l4len_idx, l4len;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ adapter->hw_tso_ctxt++;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ adapter->hw_tso6_ctxt++;
+ }
+
+ i = tx_ring->next_to_use;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= ((skb_network_offset(skb)) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ vlan_macip_lens |=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len +=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx =
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+ /* use index 1 for TSO */
+ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |= (tx_flags &
+ IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vlan_macip_lens |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ printk(KERN_WARNING
+ "partial checksum but "
+ "proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+ /* use index zero for tx checksum offload */
+ context_desc->mss_l4len_idx = 0;
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ adapter->hw_csum_tx_good++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int len;
+ unsigned int total = skb->len;
+ unsigned int offset = 0, size, count = 0;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+ int i;
+
+ i = tx_ring->next_to_use;
+
+ len = min(skb_headlen(skb), total);
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = min((unsigned int)frag->size, total);
+ offset = frag->page_offset;
+
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ if (total == 0)
+ break;
+ }
+
+ if (i == 0)
+ i = tx_ring->count - 1;
+ else
+ i = i - 1;
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring, int tx_flags,
+ int count, u32 paylen, u8 hdr_len)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 olinfo_status = 0, cmd_type_len = 0;
+ unsigned int i;
+
+ u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+ cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ /* use index 1 context for tso */
+ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ u8 hdr_len = 0;
+ int r_idx = 0, tso;
+ int count = 0;
+
+ unsigned int f;
+
+ tx_ring = &adapter->tx_ring[r_idx];
+
+ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
+ }
+
+ /* four things can cause us to need a context descriptor */
+ if (skb_is_gso(skb) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ count++;
+
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ first = tx_ring->next_to_use;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+ ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ skb->len, hdr_len);
+
+ netdev->trans_start = jiffies;
+
+ ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+
+ hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+ }
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
+ pci_disable_device(pdev);
+}
+
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = &ixgbevf_open,
+ .ndo_stop = &ixgbevf_close,
+ .ndo_start_xmit = &ixgbevf_xmit_frame,
+ .ndo_get_stats = &ixgbevf_get_stats,
+ .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
+ .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = &ixgbevf_set_mac,
+ .ndo_change_mtu = &ixgbevf_change_mtu,
+ .ndo_tx_timeout = &ixgbevf_tx_timeout,
+ .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
+};
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = netdev_priv(dev);
+ dev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbevf_set_ethtool_ops(dev);
+ dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbevf_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_regions(pdev, ixgbevf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+ MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ /*
+ * call save state here in standalone driver because it relies on
+ * adapter struct to exist, and needs to call netdev_priv
+ */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ ixgbevf_assign_netdev_ops(netdev);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mac_operations));
+
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+ /* setup the private structure */
+ err = ixgbevf_sw_init(adapter);
+
+#ifdef MAX_SKB_FRAGS
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+#endif /* MAX_SKB_FRAGS */
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ printk(KERN_ERR "invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &ixgbevf_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+
+ err = ixgbevf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* pick up the PCI bus settings for reporting later */
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ ixgbevf_init_last_counter_stats(adapter);
+
+ /* print the MAC address */
+ hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
+
+ hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+ hw_dbg(hw, "LRO is disabled \n");
+
+ hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+ ixgbevf_reset_interrupt_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->watchdog_task);
+
+ flush_scheduled_work();
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+
+ ixgbevf_reset_interrupt_capability(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ hw_dbg(&adapter->hw, "Remove complete\n");
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+ .name = ixgbevf_driver_name,
+ .id_table = ixgbevf_pci_tbl,
+ .probe = ixgbevf_probe,
+ .remove = __devexit_p(ixgbevf_remove),
+ .shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
+
+ printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+ ret = pci_register_driver(&ixgbevf_driver);
+ return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+ pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+ return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 000000000000..b8143501e6fc
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * ixgbevf_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val;
+ u16 i;
+
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_check_for_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfuly read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 000000000000..1b0e0bf4c0f5
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "vf.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct ixgbe_hw;
+
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 000000000000..12f75960aec1
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_REGS_H_
+#define _IXGBEVF_REGS_H_
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+
+#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 000000000000..4b5dec0ec140
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "vf.h"
+
+/**
+ * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbevf_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1);
+
+ msleep(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ /* also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3 */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return 0;
+}
+
+/**
+ * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit by stopped each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbevf_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ **/
+static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ *
+ * Updates the Multicast Table Array.
+ **/
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr next)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ vector_list[i] = vector;
+ }
+
+ mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ return mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: Unused in this implementation
+ * @autoneg: Unused in this implementation
+ * @autoneg_wait_to_complete: Unused in this implementation
+ *
+ * Do nothing and return success. VF drivers are not allowed to change
+ * global settings. Maintained for driver compatibility.
+ **/
+static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return 0;
+}
+
+/**
+ * ixgbevf_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ u32 links_reg;
+
+ if (!(hw->mbx.ops.check_for_rst(hw))) {
+ *link_up = false;
+ *speed = 0;
+ return -1;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return 0;
+}
+
+struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_vfta = ixgbevf_set_vfta_vf,
+};
+
+struct ixgbevf_info ixgbevf_vf_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 000000000000..1f31b052d4b4
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,174 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82599_vf,
+ ixgbe_num_macs
+};
+
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum ixgbe_mac_type type;
+
+ s32 mc_filter_type;
+
+ bool get_link_status;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+};
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *);
+ s32 (*check_for_ack)(struct ixgbe_hw *);
+ s32 (*check_for_rst)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct ixgbe_mac_info mac;
+ struct ixgbe_mbx_info mbx;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+ bool adapter_stopped;
+};
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+
+ u64 saved_reset_vfgprc;
+ u64 saved_reset_vfgptc;
+ u64 saved_reset_vfgorc;
+ u64 saved_reset_vfgotc;
+ u64 saved_reset_vfmprc;
+};
+
+struct ixgbevf_info {
+ enum ixgbe_mac_type mac;
+ struct ixgbe_mac_operations *mac_ops;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index e9d9d595e1b7..d5932ca3e27d 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
+#include <linux/gfp.h>
#include <asm/hardware/uengine.h>
#include <asm/io.h>
#include "ixp2400_rx.ucode"
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index f47d4d663b19..3e6aaf9e5ce7 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -22,11 +22,11 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -35,6 +35,7 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <asm/bootinfo.h>
#include <asm/system.h>
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc3574..b705ad3a53a7 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -37,6 +37,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_vlan.h>
+#include <linux/slab.h>
#include <net/ip6_checksum.h>
#include "jme.h"
@@ -288,7 +289,7 @@ jme_set_rx_pcc(struct jme_adapter *jme, int p)
wmb();
if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
- msg_rx_status(jme, "Switched to PCC_P%d\n", p);
+ netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
}
static void
@@ -483,13 +484,13 @@ jme_check_link(struct net_device *netdev, int testonly)
strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
"MDI-X" :
"MDI");
- msg_link(jme, "Link is up at %s.\n", linkmsg);
+ netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
netif_carrier_on(netdev);
} else {
if (testonly)
goto out;
- msg_link(jme, "Link is down.\n");
+ netif_info(jme, link, jme->dev, "Link is down.\n");
jme->phylink = 0;
netif_carrier_off(netdev);
}
@@ -883,20 +884,20 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
== RXWBFLAG_TCPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "TCP Checksum error\n");
+ netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
== RXWBFLAG_UDPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "UDP Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
== RXWBFLAG_IPV4)) {
- msg_rx_err(jme, "IPv4 Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
return false;
}
@@ -946,6 +947,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
jme->jme_vlan_rx(skb, jme->vlgrp,
le16_to_cpu(rxdesc->descwb.vlan));
NET_STAT(jme).rx_bytes += 4;
+ } else {
+ dev_kfree_skb(skb);
}
} else {
jme->jme_rx(skb);
@@ -1186,9 +1189,9 @@ jme_link_change_tasklet(unsigned long arg)
while (!atomic_dec_and_test(&jme->link_changing)) {
atomic_inc(&jme->link_changing);
- msg_intr(jme, "Get link change lock failed.\n");
+ netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
while (atomic_read(&jme->link_changing) != 1)
- msg_intr(jme, "Waiting link change lock.\n");
+ netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
}
if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1305,7 +1308,7 @@ jme_rx_empty_tasklet(unsigned long arg)
if (unlikely(!netif_carrier_ok(jme->dev)))
return;
- msg_rx_status(jme, "RX Queue Full!\n");
+ netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
jme_rx_clean_tasklet(arg);
@@ -1325,7 +1328,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
smp_wmb();
if (unlikely(netif_queue_stopped(jme->dev) &&
atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
- msg_tx_done(jme, "TX Queue Waked.\n");
+ netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
netif_wake_queue(jme->dev);
}
@@ -1835,7 +1838,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
*flags |= TXFLAG_UDPCS;
break;
default:
- msg_tx_err(jme, "Error upper layer protocol.\n");
+ netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
break;
}
}
@@ -1910,12 +1913,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
smp_wmb();
if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Paused.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
smp_wmb();
if (atomic_read(&txring->nr_free)
>= (jme->tx_wake_threshold)) {
netif_wake_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Fast Waked.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
}
}
@@ -1923,7 +1926,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
txbi->skb)) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
}
}
@@ -1946,7 +1949,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(idx < 0)) {
netif_stop_queue(netdev);
- msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
+ netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -1997,7 +2000,6 @@ jme_set_multi(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
u32 mc_hash[2] = {};
- int i;
spin_lock_bh(&jme->rxmcs_lock);
@@ -2012,10 +2014,7 @@ jme_set_multi(struct net_device *netdev)
int bit_nr;
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
- for (i = 0, mclist = netdev->mc_list;
- mclist && i < netdev->mc_count;
- ++i, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, netdev) {
bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
}
@@ -2085,12 +2084,45 @@ jme_tx_timeout(struct net_device *netdev)
jme_reset_link(jme);
}
+static inline void jme_pause_rx(struct jme_adapter *jme)
+{
+ atomic_dec(&jme->link_changing);
+
+ jme_set_rx_pcc(jme, PCC_OFF);
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_DISABLE(jme);
+ } else {
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
+ }
+}
+
+static inline void jme_resume_rx(struct jme_adapter *jme)
+{
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_ENABLE(jme);
+ } else {
+ tasklet_hi_enable(&jme->rxclean_task);
+ tasklet_hi_enable(&jme->rxempty_task);
+ }
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+ jme_set_rx_pcc(jme, PCC_P1);
+
+ atomic_inc(&jme->link_changing);
+}
+
static void
jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct jme_adapter *jme = netdev_priv(netdev);
+ jme_pause_rx(jme);
jme->vlgrp = grp;
+ jme_resume_rx(jme);
}
static void
@@ -2473,7 +2505,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2489,7 +2521,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2509,7 +2541,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2526,7 +2558,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2876,14 +2908,14 @@ jme_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
- msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
- "JMC250 Gigabit Ethernet" :
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
- "JMC260 Fast Ethernet" : "Unknown",
- (jme->fpgaver != 0) ? " (FPGA)" : "",
- (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
+ "JMC250 Gigabit Ethernet" :
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
+ "JMC260 Fast Ethernet" : "Unknown",
+ (jme->fpgaver != 0) ? " (FPGA)" : "",
+ (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
+ jme->rev, netdev->dev_addr);
return 0;
@@ -2994,7 +3026,7 @@ jme_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id jme_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
{ }
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 251abed3817e..07ad3a457185 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -25,7 +25,7 @@
#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
-#define DRV_VERSION "1.0.5"
+#define DRV_VERSION "1.0.6"
#define PFX DRV_NAME ": "
#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -45,43 +45,16 @@
printk(KERN_ERR PFX fmt, ## args)
#ifdef TX_DEBUG
-#define tx_dbg(priv, fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
+#define tx_dbg(priv, fmt, args...) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
#else
-#define tx_dbg(priv, fmt, args...)
+#define tx_dbg(priv, fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args); \
+} while (0)
#endif
-#define jme_msg(msglvl, type, priv, fmt, args...) \
- if (netif_msg_##type(priv)) \
- printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
-
-#define msg_probe(priv, fmt, args...) \
- jme_msg(KERN_INFO, probe, priv, fmt, ## args)
-
-#define msg_link(priv, fmt, args...) \
- jme_msg(KERN_INFO, link, priv, fmt, ## args)
-
-#define msg_intr(priv, fmt, args...) \
- jme_msg(KERN_INFO, intr, priv, fmt, ## args)
-
-#define msg_rx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
-
-#define msg_rx_status(priv, fmt, args...) \
- jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
-
-#define msg_tx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
-
-#define msg_tx_done(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
-
-#define msg_tx_queued(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
-
-#define msg_hw(priv, fmt, args...) \
- jme_msg(KERN_ERR, hw, priv, fmt, ## args)
-
/*
* Extra PCI Configuration space interface
*/
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 25e2af6997e4..300c2249812d 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
int i;
@@ -490,23 +490,21 @@ static void korina_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
recognise |= ETH_ARC_PRO;
- else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
+ else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
/* All multicast and broadcast */
recognise |= ETH_ARC_AM;
/* Build the hash table */
- if (dev->mc_count > 4) {
+ if (netdev_mc_count(dev) > 4) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
char *addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 6d3ac65bc35c..9e9f9b349766 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -407,7 +407,7 @@ static irqreturn_t ks8851_irq(int irq, void *pw)
* @buff: The buffer address
* @len: The length of the data to read
*
- * Issue an RXQ FIFO read command and read the @len ammount of data from
+ * Issue an RXQ FIFO read command and read the @len amount of data from
* the FIFO into the buffer specified by @buff.
*/
static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
@@ -722,12 +722,14 @@ static void ks8851_tx_work(struct work_struct *work)
txb = skb_dequeue(&ks->txq);
last = skb_queue_empty(&ks->txq);
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
- ks8851_wrpkt(ks, txb, last);
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
- ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+ if (txb != NULL) {
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrpkt(ks, txb, last);
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
- ks8851_done_tx(ks, txb);
+ ks8851_done_tx(ks, txb);
+ }
}
mutex_unlock(&ks->lock);
@@ -965,19 +967,17 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
- } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
- struct dev_mc_list *mcptr = dev->mc_list;
+ } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
+ struct dev_mc_list *mcptr;
u32 crc;
- int i;
/* accept some multicast */
- for (i = dev->mc_count; i > 0; i--) {
+ netdev_for_each_mc_addr(mcptr, dev) {
crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
crc >>= (32 - 6); /* get top six bits */
rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
- mcptr = mcptr->next;
}
rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c146304d8d6c..6354ab3a45a6 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -31,6 +31,7 @@
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#define DRV_NAME "ks8851_mll"
@@ -854,8 +855,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
static irqreturn_t ks_irq(int irq, void *pw)
{
- struct ks_net *ks = pw;
- struct net_device *netdev = ks->netdev;
+ struct net_device *netdev = pw;
+ struct ks_net *ks = netdev_priv(netdev);
u16 status;
/*this should be the first in IRQ handler */
@@ -1193,10 +1194,11 @@ static void ks_set_rx_mode(struct net_device *netdev)
else
ks_set_promis(ks, false);
- if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
- if (netdev->mc_count <= MAX_MCAST_LST) {
+ if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
+ if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
- for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
+
+ netdev_for_each_mc_addr(ptr, netdev) {
if (!(*ptr->dmi_addr & 1))
continue;
if (i >= MAX_MCAST_LST)
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
new file mode 100644
index 000000000000..0606a1f359fb
--- /dev/null
+++ b/drivers/net/ksz884x.c
@@ -0,0 +1,7338 @@
+/**
+ * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
+ *
+ * Copyright (c) 2009-2010 Micrel, Inc.
+ * Tristram Ha <Tristram.Ha@micrel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+
+/* DMA Registers */
+
+#define KS_DMA_TX_CTRL 0x0000
+#define DMA_TX_ENABLE 0x00000001
+#define DMA_TX_CRC_ENABLE 0x00000002
+#define DMA_TX_PAD_ENABLE 0x00000004
+#define DMA_TX_LOOPBACK 0x00000100
+#define DMA_TX_FLOW_ENABLE 0x00000200
+#define DMA_TX_CSUM_IP 0x00010000
+#define DMA_TX_CSUM_TCP 0x00020000
+#define DMA_TX_CSUM_UDP 0x00040000
+#define DMA_TX_BURST_SIZE 0x3F000000
+
+#define KS_DMA_RX_CTRL 0x0004
+#define DMA_RX_ENABLE 0x00000001
+#define KS884X_DMA_RX_MULTICAST 0x00000002
+#define DMA_RX_PROMISCUOUS 0x00000004
+#define DMA_RX_ERROR 0x00000008
+#define DMA_RX_UNICAST 0x00000010
+#define DMA_RX_ALL_MULTICAST 0x00000020
+#define DMA_RX_BROADCAST 0x00000040
+#define DMA_RX_FLOW_ENABLE 0x00000200
+#define DMA_RX_CSUM_IP 0x00010000
+#define DMA_RX_CSUM_TCP 0x00020000
+#define DMA_RX_CSUM_UDP 0x00040000
+#define DMA_RX_BURST_SIZE 0x3F000000
+
+#define DMA_BURST_SHIFT 24
+#define DMA_BURST_DEFAULT 8
+
+#define KS_DMA_TX_START 0x0008
+#define KS_DMA_RX_START 0x000C
+#define DMA_START 0x00000001
+
+#define KS_DMA_TX_ADDR 0x0010
+#define KS_DMA_RX_ADDR 0x0014
+
+#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
+#define DMA_ADDR_LIST_SHIFT 2
+
+/* MTR0 */
+#define KS884X_MULTICAST_0_OFFSET 0x0020
+#define KS884X_MULTICAST_1_OFFSET 0x0021
+#define KS884X_MULTICAST_2_OFFSET 0x0022
+#define KS884x_MULTICAST_3_OFFSET 0x0023
+/* MTR1 */
+#define KS884X_MULTICAST_4_OFFSET 0x0024
+#define KS884X_MULTICAST_5_OFFSET 0x0025
+#define KS884X_MULTICAST_6_OFFSET 0x0026
+#define KS884X_MULTICAST_7_OFFSET 0x0027
+
+/* Interrupt Registers */
+
+/* INTEN */
+#define KS884X_INTERRUPTS_ENABLE 0x0028
+/* INTST */
+#define KS884X_INTERRUPTS_STATUS 0x002C
+
+#define KS884X_INT_RX_STOPPED 0x02000000
+#define KS884X_INT_TX_STOPPED 0x04000000
+#define KS884X_INT_RX_OVERRUN 0x08000000
+#define KS884X_INT_TX_EMPTY 0x10000000
+#define KS884X_INT_RX 0x20000000
+#define KS884X_INT_TX 0x40000000
+#define KS884X_INT_PHY 0x80000000
+
+#define KS884X_INT_RX_MASK \
+ (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
+#define KS884X_INT_TX_MASK \
+ (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
+#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
+
+/* MAC Additional Station Address */
+
+/* MAAL0 */
+#define KS_ADD_ADDR_0_LO 0x0080
+/* MAAH0 */
+#define KS_ADD_ADDR_0_HI 0x0084
+/* MAAL1 */
+#define KS_ADD_ADDR_1_LO 0x0088
+/* MAAH1 */
+#define KS_ADD_ADDR_1_HI 0x008C
+/* MAAL2 */
+#define KS_ADD_ADDR_2_LO 0x0090
+/* MAAH2 */
+#define KS_ADD_ADDR_2_HI 0x0094
+/* MAAL3 */
+#define KS_ADD_ADDR_3_LO 0x0098
+/* MAAH3 */
+#define KS_ADD_ADDR_3_HI 0x009C
+/* MAAL4 */
+#define KS_ADD_ADDR_4_LO 0x00A0
+/* MAAH4 */
+#define KS_ADD_ADDR_4_HI 0x00A4
+/* MAAL5 */
+#define KS_ADD_ADDR_5_LO 0x00A8
+/* MAAH5 */
+#define KS_ADD_ADDR_5_HI 0x00AC
+/* MAAL6 */
+#define KS_ADD_ADDR_6_LO 0x00B0
+/* MAAH6 */
+#define KS_ADD_ADDR_6_HI 0x00B4
+/* MAAL7 */
+#define KS_ADD_ADDR_7_LO 0x00B8
+/* MAAH7 */
+#define KS_ADD_ADDR_7_HI 0x00BC
+/* MAAL8 */
+#define KS_ADD_ADDR_8_LO 0x00C0
+/* MAAH8 */
+#define KS_ADD_ADDR_8_HI 0x00C4
+/* MAAL9 */
+#define KS_ADD_ADDR_9_LO 0x00C8
+/* MAAH9 */
+#define KS_ADD_ADDR_9_HI 0x00CC
+/* MAAL10 */
+#define KS_ADD_ADDR_A_LO 0x00D0
+/* MAAH10 */
+#define KS_ADD_ADDR_A_HI 0x00D4
+/* MAAL11 */
+#define KS_ADD_ADDR_B_LO 0x00D8
+/* MAAH11 */
+#define KS_ADD_ADDR_B_HI 0x00DC
+/* MAAL12 */
+#define KS_ADD_ADDR_C_LO 0x00E0
+/* MAAH12 */
+#define KS_ADD_ADDR_C_HI 0x00E4
+/* MAAL13 */
+#define KS_ADD_ADDR_D_LO 0x00E8
+/* MAAH13 */
+#define KS_ADD_ADDR_D_HI 0x00EC
+/* MAAL14 */
+#define KS_ADD_ADDR_E_LO 0x00F0
+/* MAAH14 */
+#define KS_ADD_ADDR_E_HI 0x00F4
+/* MAAL15 */
+#define KS_ADD_ADDR_F_LO 0x00F8
+/* MAAH15 */
+#define KS_ADD_ADDR_F_HI 0x00FC
+
+#define ADD_ADDR_HI_MASK 0x0000FFFF
+#define ADD_ADDR_ENABLE 0x80000000
+#define ADD_ADDR_INCR 8
+
+/* Miscellaneous Registers */
+
+/* MARL */
+#define KS884X_ADDR_0_OFFSET 0x0200
+#define KS884X_ADDR_1_OFFSET 0x0201
+/* MARM */
+#define KS884X_ADDR_2_OFFSET 0x0202
+#define KS884X_ADDR_3_OFFSET 0x0203
+/* MARH */
+#define KS884X_ADDR_4_OFFSET 0x0204
+#define KS884X_ADDR_5_OFFSET 0x0205
+
+/* OBCR */
+#define KS884X_BUS_CTRL_OFFSET 0x0210
+
+#define BUS_SPEED_125_MHZ 0x0000
+#define BUS_SPEED_62_5_MHZ 0x0001
+#define BUS_SPEED_41_66_MHZ 0x0002
+#define BUS_SPEED_25_MHZ 0x0003
+
+/* EEPCR */
+#define KS884X_EEPROM_CTRL_OFFSET 0x0212
+
+#define EEPROM_CHIP_SELECT 0x0001
+#define EEPROM_SERIAL_CLOCK 0x0002
+#define EEPROM_DATA_OUT 0x0004
+#define EEPROM_DATA_IN 0x0008
+#define EEPROM_ACCESS_ENABLE 0x0010
+
+/* MBIR */
+#define KS884X_MEM_INFO_OFFSET 0x0214
+
+#define RX_MEM_TEST_FAILED 0x0008
+#define RX_MEM_TEST_FINISHED 0x0010
+#define TX_MEM_TEST_FAILED 0x0800
+#define TX_MEM_TEST_FINISHED 0x1000
+
+/* GCR */
+#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
+#define GLOBAL_SOFTWARE_RESET 0x0001
+
+#define KS8841_POWER_MANAGE_OFFSET 0x0218
+
+/* WFCR */
+#define KS8841_WOL_CTRL_OFFSET 0x021A
+#define KS8841_WOL_MAGIC_ENABLE 0x0080
+#define KS8841_WOL_FRAME3_ENABLE 0x0008
+#define KS8841_WOL_FRAME2_ENABLE 0x0004
+#define KS8841_WOL_FRAME1_ENABLE 0x0002
+#define KS8841_WOL_FRAME0_ENABLE 0x0001
+
+/* WF0 */
+#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
+#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
+#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
+
+/* IACR */
+#define KS884X_IACR_P 0x04A0
+#define KS884X_IACR_OFFSET KS884X_IACR_P
+
+/* IADR1 */
+#define KS884X_IADR1_P 0x04A2
+#define KS884X_IADR2_P 0x04A4
+#define KS884X_IADR3_P 0x04A6
+#define KS884X_IADR4_P 0x04A8
+#define KS884X_IADR5_P 0x04AA
+
+#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
+#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
+
+#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
+#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
+#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
+#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
+#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
+#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
+#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
+#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
+#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
+
+/* P1MBCR */
+#define KS884X_P1MBCR_P 0x04D0
+#define KS884X_P1MBSR_P 0x04D2
+#define KS884X_PHY1ILR_P 0x04D4
+#define KS884X_PHY1IHR_P 0x04D6
+#define KS884X_P1ANAR_P 0x04D8
+#define KS884X_P1ANLPR_P 0x04DA
+
+/* P2MBCR */
+#define KS884X_P2MBCR_P 0x04E0
+#define KS884X_P2MBSR_P 0x04E2
+#define KS884X_PHY2ILR_P 0x04E4
+#define KS884X_PHY2IHR_P 0x04E6
+#define KS884X_P2ANAR_P 0x04E8
+#define KS884X_P2ANLPR_P 0x04EA
+
+#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
+#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
+
+#define KS884X_PHY_CTRL_OFFSET 0x00
+
+/* Mode Control Register */
+#define PHY_REG_CTRL 0
+
+#define PHY_RESET 0x8000
+#define PHY_LOOPBACK 0x4000
+#define PHY_SPEED_100MBIT 0x2000
+#define PHY_AUTO_NEG_ENABLE 0x1000
+#define PHY_POWER_DOWN 0x0800
+#define PHY_MII_DISABLE 0x0400
+#define PHY_AUTO_NEG_RESTART 0x0200
+#define PHY_FULL_DUPLEX 0x0100
+#define PHY_COLLISION_TEST 0x0080
+#define PHY_HP_MDIX 0x0020
+#define PHY_FORCE_MDIX 0x0010
+#define PHY_AUTO_MDIX_DISABLE 0x0008
+#define PHY_REMOTE_FAULT_DISABLE 0x0004
+#define PHY_TRANSMIT_DISABLE 0x0002
+#define PHY_LED_DISABLE 0x0001
+
+#define KS884X_PHY_STATUS_OFFSET 0x02
+
+/* Mode Status Register */
+#define PHY_REG_STATUS 1
+
+#define PHY_100BT4_CAPABLE 0x8000
+#define PHY_100BTX_FD_CAPABLE 0x4000
+#define PHY_100BTX_CAPABLE 0x2000
+#define PHY_10BT_FD_CAPABLE 0x1000
+#define PHY_10BT_CAPABLE 0x0800
+#define PHY_MII_SUPPRESS_CAPABLE 0x0040
+#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
+#define PHY_REMOTE_FAULT 0x0010
+#define PHY_AUTO_NEG_CAPABLE 0x0008
+#define PHY_LINK_STATUS 0x0004
+#define PHY_JABBER_DETECT 0x0002
+#define PHY_EXTENDED_CAPABILITY 0x0001
+
+#define KS884X_PHY_ID_1_OFFSET 0x04
+#define KS884X_PHY_ID_2_OFFSET 0x06
+
+/* PHY Identifier Registers */
+#define PHY_REG_ID_1 2
+#define PHY_REG_ID_2 3
+
+#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
+
+/* Auto-Negotiation Advertisement Register */
+#define PHY_REG_AUTO_NEGOTIATION 4
+
+#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
+#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
+/* Not supported. */
+#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
+#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
+#define PHY_AUTO_NEG_100BT4 0x0200
+#define PHY_AUTO_NEG_100BTX_FD 0x0100
+#define PHY_AUTO_NEG_100BTX 0x0080
+#define PHY_AUTO_NEG_10BT_FD 0x0040
+#define PHY_AUTO_NEG_10BT 0x0020
+#define PHY_AUTO_NEG_SELECTOR 0x001F
+#define PHY_AUTO_NEG_802_3 0x0001
+
+#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
+
+#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
+
+/* Auto-Negotiation Link Partner Ability Register */
+#define PHY_REG_REMOTE_CAPABILITY 5
+
+#define PHY_REMOTE_NEXT_PAGE 0x8000
+#define PHY_REMOTE_ACKNOWLEDGE 0x4000
+#define PHY_REMOTE_REMOTE_FAULT 0x2000
+#define PHY_REMOTE_SYM_PAUSE 0x0400
+#define PHY_REMOTE_100BTX_FD 0x0100
+#define PHY_REMOTE_100BTX 0x0080
+#define PHY_REMOTE_10BT_FD 0x0040
+#define PHY_REMOTE_10BT 0x0020
+
+/* P1VCT */
+#define KS884X_P1VCT_P 0x04F0
+#define KS884X_P1PHYCTRL_P 0x04F2
+
+/* P2VCT */
+#define KS884X_P2VCT_P 0x04F4
+#define KS884X_P2PHYCTRL_P 0x04F6
+
+#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
+#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
+
+#define KS884X_PHY_LINK_MD_OFFSET 0x00
+
+#define PHY_START_CABLE_DIAG 0x8000
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT 0x1000
+#define PHY_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
+
+#define PHY_STAT_REVERSED_POLARITY 0x0020
+#define PHY_STAT_MDIX 0x0010
+#define PHY_FORCE_LINK 0x0008
+#define PHY_POWER_SAVING_DISABLE 0x0004
+#define PHY_REMOTE_LOOPBACK 0x0002
+
+/* SIDER */
+#define KS884X_SIDER_P 0x0400
+#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
+#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
+
+#define REG_FAMILY_ID 0x88
+
+#define REG_CHIP_ID_41 0x8810
+#define REG_CHIP_ID_42 0x8800
+
+#define KS884X_CHIP_ID_MASK_41 0xFF10
+#define KS884X_CHIP_ID_MASK 0xFFF0
+#define KS884X_CHIP_ID_SHIFT 4
+#define KS884X_REVISION_MASK 0x000E
+#define KS884X_REVISION_SHIFT 1
+#define KS8842_START 0x0001
+
+#define CHIP_IP_41_M 0x8810
+#define CHIP_IP_42_M 0x8800
+#define CHIP_IP_61_M 0x8890
+#define CHIP_IP_62_M 0x8880
+
+#define CHIP_IP_41_P 0x8850
+#define CHIP_IP_42_P 0x8840
+#define CHIP_IP_61_P 0x88D0
+#define CHIP_IP_62_P 0x88C0
+
+/* SGCR1 */
+#define KS8842_SGCR1_P 0x0402
+#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
+
+#define SWITCH_PASS_ALL 0x8000
+#define SWITCH_TX_FLOW_CTRL 0x2000
+#define SWITCH_RX_FLOW_CTRL 0x1000
+#define SWITCH_CHECK_LENGTH 0x0800
+#define SWITCH_AGING_ENABLE 0x0400
+#define SWITCH_FAST_AGING 0x0200
+#define SWITCH_AGGR_BACKOFF 0x0100
+#define SWITCH_PASS_PAUSE 0x0008
+#define SWITCH_LINK_AUTO_AGING 0x0001
+
+/* SGCR2 */
+#define KS8842_SGCR2_P 0x0404
+#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
+
+#define SWITCH_VLAN_ENABLE 0x8000
+#define SWITCH_IGMP_SNOOP 0x4000
+#define IPV6_MLD_SNOOP_ENABLE 0x2000
+#define IPV6_MLD_SNOOP_OPTION 0x1000
+#define PRIORITY_SCHEME_SELECT 0x0800
+#define SWITCH_MIRROR_RX_TX 0x0100
+#define UNICAST_VLAN_BOUNDARY 0x0080
+#define MULTICAST_STORM_DISABLE 0x0040
+#define SWITCH_BACK_PRESSURE 0x0020
+#define FAIR_FLOW_CTRL 0x0010
+#define NO_EXC_COLLISION_DROP 0x0008
+#define SWITCH_HUGE_PACKET 0x0004
+#define SWITCH_LEGAL_PACKET 0x0002
+#define SWITCH_BUF_RESERVE 0x0001
+
+/* SGCR3 */
+#define KS8842_SGCR3_P 0x0406
+#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
+
+#define BROADCAST_STORM_RATE_LO 0xFF00
+#define SWITCH_REPEATER 0x0080
+#define SWITCH_HALF_DUPLEX 0x0040
+#define SWITCH_FLOW_CTRL 0x0020
+#define SWITCH_10_MBIT 0x0010
+#define SWITCH_REPLACE_NULL_VID 0x0008
+#define BROADCAST_STORM_RATE_HI 0x0007
+
+#define BROADCAST_STORM_RATE 0x07FF
+
+/* SGCR4 */
+#define KS8842_SGCR4_P 0x0408
+
+/* SGCR5 */
+#define KS8842_SGCR5_P 0x040A
+#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
+
+#define LED_MODE 0x8200
+#define LED_SPEED_DUPLEX_ACT 0x0000
+#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
+#define LED_DUPLEX_10_100 0x0200
+
+/* SGCR6 */
+#define KS8842_SGCR6_P 0x0410
+#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
+
+#define KS8842_PRIORITY_MASK 3
+#define KS8842_PRIORITY_SHIFT 2
+
+/* SGCR7 */
+#define KS8842_SGCR7_P 0x0412
+#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
+
+#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
+#define SWITCH_UNK_DEF_PORT_3 0x0004
+#define SWITCH_UNK_DEF_PORT_2 0x0002
+#define SWITCH_UNK_DEF_PORT_1 0x0001
+
+/* MACAR1 */
+#define KS8842_MACAR1_P 0x0470
+#define KS8842_MACAR2_P 0x0472
+#define KS8842_MACAR3_P 0x0474
+#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
+#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
+#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
+#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
+#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
+#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
+
+/* TOSR1 */
+#define KS8842_TOSR1_P 0x0480
+#define KS8842_TOSR2_P 0x0482
+#define KS8842_TOSR3_P 0x0484
+#define KS8842_TOSR4_P 0x0486
+#define KS8842_TOSR5_P 0x0488
+#define KS8842_TOSR6_P 0x048A
+#define KS8842_TOSR7_P 0x0490
+#define KS8842_TOSR8_P 0x0492
+#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
+#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
+#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
+#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
+#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
+#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
+
+#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
+#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
+
+/* P1CR1 */
+#define KS8842_P1CR1_P 0x0500
+#define KS8842_P1CR2_P 0x0502
+#define KS8842_P1VIDR_P 0x0504
+#define KS8842_P1CR3_P 0x0506
+#define KS8842_P1IRCR_P 0x0508
+#define KS8842_P1ERCR_P 0x050A
+#define KS884X_P1SCSLMD_P 0x0510
+#define KS884X_P1CR4_P 0x0512
+#define KS884X_P1SR_P 0x0514
+
+/* P2CR1 */
+#define KS8842_P2CR1_P 0x0520
+#define KS8842_P2CR2_P 0x0522
+#define KS8842_P2VIDR_P 0x0524
+#define KS8842_P2CR3_P 0x0526
+#define KS8842_P2IRCR_P 0x0528
+#define KS8842_P2ERCR_P 0x052A
+#define KS884X_P2SCSLMD_P 0x0530
+#define KS884X_P2CR4_P 0x0532
+#define KS884X_P2SR_P 0x0534
+
+/* P3CR1 */
+#define KS8842_P3CR1_P 0x0540
+#define KS8842_P3CR2_P 0x0542
+#define KS8842_P3VIDR_P 0x0544
+#define KS8842_P3CR3_P 0x0546
+#define KS8842_P3IRCR_P 0x0548
+#define KS8842_P3ERCR_P 0x054A
+
+#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
+#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
+#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
+
+#define PORT_CTRL_ADDR(port, addr) \
+ (addr = KS8842_PORT_1_CTRL_1 + (port) * \
+ (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
+
+#define KS8842_PORT_CTRL_1_OFFSET 0x00
+
+#define PORT_BROADCAST_STORM 0x0080
+#define PORT_DIFFSERV_ENABLE 0x0040
+#define PORT_802_1P_ENABLE 0x0020
+#define PORT_BASED_PRIORITY_MASK 0x0018
+#define PORT_BASED_PRIORITY_BASE 0x0003
+#define PORT_BASED_PRIORITY_SHIFT 3
+#define PORT_BASED_PRIORITY_0 0x0000
+#define PORT_BASED_PRIORITY_1 0x0008
+#define PORT_BASED_PRIORITY_2 0x0010
+#define PORT_BASED_PRIORITY_3 0x0018
+#define PORT_INSERT_TAG 0x0004
+#define PORT_REMOVE_TAG 0x0002
+#define PORT_PRIO_QUEUE_ENABLE 0x0001
+
+#define KS8842_PORT_CTRL_2_OFFSET 0x02
+
+#define PORT_INGRESS_VLAN_FILTER 0x4000
+#define PORT_DISCARD_NON_VID 0x2000
+#define PORT_FORCE_FLOW_CTRL 0x1000
+#define PORT_BACK_PRESSURE 0x0800
+#define PORT_TX_ENABLE 0x0400
+#define PORT_RX_ENABLE 0x0200
+#define PORT_LEARN_DISABLE 0x0100
+#define PORT_MIRROR_SNIFFER 0x0080
+#define PORT_MIRROR_RX 0x0040
+#define PORT_MIRROR_TX 0x0020
+#define PORT_USER_PRIORITY_CEILING 0x0008
+#define PORT_VLAN_MEMBERSHIP 0x0007
+
+#define KS8842_PORT_CTRL_VID_OFFSET 0x04
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define KS8842_PORT_CTRL_3_OFFSET 0x06
+
+#define PORT_INGRESS_LIMIT_MODE 0x000C
+#define PORT_INGRESS_ALL 0x0000
+#define PORT_INGRESS_UNICAST 0x0004
+#define PORT_INGRESS_MULTICAST 0x0008
+#define PORT_INGRESS_BROADCAST 0x000C
+#define PORT_COUNT_IFG 0x0002
+#define PORT_COUNT_PREAMBLE 0x0001
+
+#define KS8842_PORT_IN_RATE_OFFSET 0x08
+#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
+
+#define PORT_PRIORITY_RATE 0x0F
+#define PORT_PRIORITY_RATE_SHIFT 4
+
+#define KS884X_PORT_LINK_MD 0x10
+
+#define PORT_CABLE_10M_SHORT 0x8000
+#define PORT_CABLE_DIAG_RESULT 0x6000
+#define PORT_CABLE_STAT_NORMAL 0x0000
+#define PORT_CABLE_STAT_OPEN 0x2000
+#define PORT_CABLE_STAT_SHORT 0x4000
+#define PORT_CABLE_STAT_FAILED 0x6000
+#define PORT_START_CABLE_DIAG 0x1000
+#define PORT_FORCE_LINK 0x0800
+#define PORT_POWER_SAVING_DISABLE 0x0400
+#define PORT_PHY_REMOTE_LOOPBACK 0x0200
+#define PORT_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PORT_CTRL_4_OFFSET 0x12
+
+#define PORT_LED_OFF 0x8000
+#define PORT_TX_DISABLE 0x4000
+#define PORT_AUTO_NEG_RESTART 0x2000
+#define PORT_REMOTE_FAULT_DISABLE 0x1000
+#define PORT_POWER_DOWN 0x0800
+#define PORT_AUTO_MDIX_DISABLE 0x0400
+#define PORT_FORCE_MDIX 0x0200
+#define PORT_LOOPBACK 0x0100
+#define PORT_AUTO_NEG_ENABLE 0x0080
+#define PORT_FORCE_100_MBIT 0x0040
+#define PORT_FORCE_FULL_DUPLEX 0x0020
+#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
+#define PORT_AUTO_NEG_100BTX_FD 0x0008
+#define PORT_AUTO_NEG_100BTX 0x0004
+#define PORT_AUTO_NEG_10BT_FD 0x0002
+#define PORT_AUTO_NEG_10BT 0x0001
+
+#define KS884X_PORT_STATUS_OFFSET 0x14
+
+#define PORT_HP_MDIX 0x8000
+#define PORT_REVERSED_POLARITY 0x2000
+#define PORT_RX_FLOW_CTRL 0x0800
+#define PORT_TX_FLOW_CTRL 0x1000
+#define PORT_STATUS_SPEED_100MBIT 0x0400
+#define PORT_STATUS_FULL_DUPLEX 0x0200
+#define PORT_REMOTE_FAULT 0x0100
+#define PORT_MDIX_STATUS 0x0080
+#define PORT_AUTO_NEG_COMPLETE 0x0040
+#define PORT_STATUS_LINK_GOOD 0x0020
+#define PORT_REMOTE_SYM_PAUSE 0x0010
+#define PORT_REMOTE_100BTX_FD 0x0008
+#define PORT_REMOTE_100BTX 0x0004
+#define PORT_REMOTE_10BT_FD 0x0002
+#define PORT_REMOTE_10BT 0x0001
+
+/*
+#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
+#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
+#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
+#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
+#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
+*/
+
+#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
+#define STATIC_MAC_TABLE_VALID 0x00080000
+#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
+#define STATIC_MAC_TABLE_USE_FID 0x00200000
+#define STATIC_MAC_TABLE_FID 0x03C00000
+
+#define STATIC_MAC_FWD_PORTS_SHIFT 16
+#define STATIC_MAC_FID_SHIFT 22
+
+/*
+#define VLAN_TABLE_VID 00-00000000-00000FFF
+#define VLAN_TABLE_FID 00-00000000-0000F000
+#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
+#define VLAN_TABLE_VALID 00-00000000-00080000
+*/
+
+#define VLAN_TABLE_VID 0x00000FFF
+#define VLAN_TABLE_FID 0x0000F000
+#define VLAN_TABLE_MEMBERSHIP 0x00070000
+#define VLAN_TABLE_VALID 0x00080000
+
+#define VLAN_TABLE_FID_SHIFT 12
+#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
+
+/*
+#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
+#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
+#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
+#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
+*/
+
+#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
+#define DYNAMIC_MAC_TABLE_FID 0x000F0000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
+#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
+
+#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
+#define DYNAMIC_MAC_TABLE_RESERVED 0x78
+#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
+
+#define DYNAMIC_MAC_FID_SHIFT 16
+#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
+#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
+#define DYNAMIC_MAC_ENTRIES_SHIFT 24
+#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
+
+/*
+#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+#define MIB_COUNTER_VALID 00-00000000-40000000
+#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
+*/
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+#define MIB_COUNTER_VALID 0x40000000
+#define MIB_COUNTER_OVERFLOW 0x80000000
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define KS_MIB_PACKET_DROPPED_TX_0 0x100
+#define KS_MIB_PACKET_DROPPED_TX_1 0x101
+#define KS_MIB_PACKET_DROPPED_TX 0x102
+#define KS_MIB_PACKET_DROPPED_RX_0 0x103
+#define KS_MIB_PACKET_DROPPED_RX_1 0x104
+#define KS_MIB_PACKET_DROPPED_RX 0x105
+
+/* Change default LED mode. */
+#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
+
+#define MAC_ADDR_LEN 6
+#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+
+#define MAX_ETHERNET_BODY_SIZE 1500
+#define ETHERNET_HEADER_SIZE 14
+
+#define MAX_ETHERNET_PACKET_SIZE \
+ (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
+
+#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
+#define MAX_RX_BUF_SIZE (1912 + 4)
+
+#define ADDITIONAL_ENTRIES 16
+#define MAX_MULTICAST_LIST 32
+
+#define HW_MULTICAST_SIZE 8
+
+#define HW_TO_DEV_PORT(port) (port - 1)
+
+enum {
+ media_connected,
+ media_disconnected
+};
+
+enum {
+ OID_COUNTER_UNKOWN,
+
+ OID_COUNTER_FIRST,
+
+ /* total transmit errors */
+ OID_COUNTER_XMIT_ERROR,
+
+ /* total receive errors */
+ OID_COUNTER_RCV_ERROR,
+
+ OID_COUNTER_LAST
+};
+
+/*
+ * Hardware descriptor definitions
+ */
+
+#define DESC_ALIGNMENT 16
+#define BUFFER_ALIGNMENT 8
+
+#define NUM_OF_RX_DESC 64
+#define NUM_OF_TX_DESC 64
+
+#define KS_DESC_RX_FRAME_LEN 0x000007FF
+#define KS_DESC_RX_FRAME_TYPE 0x00008000
+#define KS_DESC_RX_ERROR_CRC 0x00010000
+#define KS_DESC_RX_ERROR_RUNT 0x00020000
+#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
+#define KS_DESC_RX_ERROR_PHY 0x00080000
+#define KS884X_DESC_RX_PORT_MASK 0x00300000
+#define KS_DESC_RX_MULTICAST 0x01000000
+#define KS_DESC_RX_ERROR 0x02000000
+#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
+#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
+#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
+#define KS_DESC_RX_LAST 0x20000000
+#define KS_DESC_RX_FIRST 0x40000000
+#define KS_DESC_RX_ERROR_COND \
+ (KS_DESC_RX_ERROR_CRC | \
+ KS_DESC_RX_ERROR_RUNT | \
+ KS_DESC_RX_ERROR_PHY | \
+ KS_DESC_RX_ERROR_TOO_LONG)
+
+#define KS_DESC_HW_OWNED 0x80000000
+
+#define KS_DESC_BUF_SIZE 0x000007FF
+#define KS884X_DESC_TX_PORT_MASK 0x00300000
+#define KS_DESC_END_OF_RING 0x02000000
+#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
+#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
+#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
+#define KS_DESC_TX_LAST 0x20000000
+#define KS_DESC_TX_FIRST 0x40000000
+#define KS_DESC_TX_INTERRUPT 0x80000000
+
+#define KS_DESC_PORT_SHIFT 20
+
+#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
+
+#define KS_DESC_TX_MASK \
+ (KS_DESC_TX_INTERRUPT | \
+ KS_DESC_TX_FIRST | \
+ KS_DESC_TX_LAST | \
+ KS_DESC_TX_CSUM_GEN_IP | \
+ KS_DESC_TX_CSUM_GEN_TCP | \
+ KS_DESC_TX_CSUM_GEN_UDP | \
+ KS_DESC_BUF_SIZE)
+
+struct ksz_desc_rx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 first_desc:1;
+ u32 last_desc:1;
+ u32 csum_err_ip:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_udp:1;
+ u32 error:1;
+ u32 multicast:1;
+ u32 src_port:4;
+ u32 err_phy:1;
+ u32 err_too_long:1;
+ u32 err_runt:1;
+ u32 err_crc:1;
+ u32 frame_type:1;
+ u32 reserved1:4;
+ u32 frame_len:11;
+#else
+ u32 frame_len:11;
+ u32 reserved1:4;
+ u32 frame_type:1;
+ u32 err_crc:1;
+ u32 err_runt:1;
+ u32 err_too_long:1;
+ u32 err_phy:1;
+ u32 src_port:4;
+ u32 multicast:1;
+ u32 error:1;
+ u32 csum_err_udp:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_ip:1;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_tx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 reserved1:31;
+#else
+ u32 reserved1:31;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_rx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved4:6;
+ u32 end_of_ring:1;
+ u32 reserved3:14;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:14;
+ u32 end_of_ring:1;
+ u32 reserved4:6;
+#endif
+};
+
+struct ksz_desc_tx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 intr:1;
+ u32 first_seg:1;
+ u32 last_seg:1;
+ u32 csum_gen_ip:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_udp:1;
+ u32 end_of_ring:1;
+ u32 reserved4:1;
+ u32 dest_port:4;
+ u32 reserved3:9;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:9;
+ u32 dest_port:4;
+ u32 reserved4:1;
+ u32 end_of_ring:1;
+ u32 csum_gen_udp:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_ip:1;
+ u32 last_seg:1;
+ u32 first_seg:1;
+ u32 intr:1;
+#endif
+};
+
+union desc_stat {
+ struct ksz_desc_rx_stat rx;
+ struct ksz_desc_tx_stat tx;
+ u32 data;
+};
+
+union desc_buf {
+ struct ksz_desc_rx_buf rx;
+ struct ksz_desc_tx_buf tx;
+ u32 data;
+};
+
+/**
+ * struct ksz_hw_desc - Hardware descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @addr: Physical address of memory buffer.
+ * @next: Pointer to next hardware descriptor.
+ */
+struct ksz_hw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 addr;
+ u32 next;
+};
+
+/**
+ * struct ksz_sw_desc - Software descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @buf_size: Current buffers size value in hardware descriptor.
+ */
+struct ksz_sw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 buf_size;
+};
+
+/**
+ * struct ksz_dma_buf - OS dependent DMA buffer data structure
+ * @skb: Associated socket buffer.
+ * @dma: Associated physical DMA address.
+ * len: Actual len used.
+ */
+struct ksz_dma_buf {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ int len;
+};
+
+/**
+ * struct ksz_desc - Descriptor structure
+ * @phw: Hardware descriptor pointer to uncached physical memory.
+ * @sw: Cached memory to hold hardware descriptor values for
+ * manipulation.
+ * @dma_buf: Operating system dependent data structure to hold physical
+ * memory buffer allocation information.
+ */
+struct ksz_desc {
+ struct ksz_hw_desc *phw;
+ struct ksz_sw_desc sw;
+ struct ksz_dma_buf dma_buf;
+};
+
+#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
+
+/**
+ * struct ksz_desc_info - Descriptor information data structure
+ * @ring: First descriptor in the ring.
+ * @cur: Current descriptor being manipulated.
+ * @ring_virt: First hardware descriptor in the ring.
+ * @ring_phys: The physical address of the first descriptor of the ring.
+ * @size: Size of hardware descriptor.
+ * @alloc: Number of descriptors allocated.
+ * @avail: Number of descriptors available for use.
+ * @last: Index for last descriptor released to hardware.
+ * @next: Index for next descriptor available for use.
+ * @mask: Mask for index wrapping.
+ */
+struct ksz_desc_info {
+ struct ksz_desc *ring;
+ struct ksz_desc *cur;
+ struct ksz_hw_desc *ring_virt;
+ u32 ring_phys;
+ int size;
+ int alloc;
+ int avail;
+ int last;
+ int next;
+ int mask;
+};
+
+/*
+ * KSZ8842 switch definitions
+ */
+
+enum {
+ TABLE_STATIC_MAC = 0,
+ TABLE_VLAN,
+ TABLE_DYNAMIC_MAC,
+ TABLE_MIB
+};
+
+#define LEARNED_MAC_TABLE_ENTRIES 1024
+#define STATIC_MAC_TABLE_ENTRIES 8
+
+/**
+ * struct ksz_mac_table - Static MAC table data structure
+ * @mac_addr: MAC address to filter.
+ * @vid: VID value.
+ * @fid: FID value.
+ * @ports: Port membership.
+ * @override: Override setting.
+ * @use_fid: FID use setting.
+ * @valid: Valid setting indicating the entry is being used.
+ */
+struct ksz_mac_table {
+ u8 mac_addr[MAC_ADDR_LEN];
+ u16 vid;
+ u8 fid;
+ u8 ports;
+ u8 override:1;
+ u8 use_fid:1;
+ u8 valid:1;
+};
+
+#define VLAN_TABLE_ENTRIES 16
+
+/**
+ * struct ksz_vlan_table - VLAN table data structure
+ * @vid: VID value.
+ * @fid: FID value.
+ * @member: Port membership.
+ */
+struct ksz_vlan_table {
+ u16 vid;
+ u8 fid;
+ u8 member;
+};
+
+#define DIFFSERV_ENTRIES 64
+#define PRIO_802_1P_ENTRIES 8
+#define PRIO_QUEUES 4
+
+#define SWITCH_PORT_NUM 2
+#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
+#define HOST_MASK (1 << SWITCH_PORT_NUM)
+#define PORT_MASK 7
+
+#define MAIN_PORT 0
+#define OTHER_PORT 1
+#define HOST_PORT SWITCH_PORT_NUM
+
+#define PORT_COUNTER_NUM 0x20
+#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
+
+#define MIB_COUNTER_RX_LO_PRIORITY 0x00
+#define MIB_COUNTER_RX_HI_PRIORITY 0x01
+#define MIB_COUNTER_RX_UNDERSIZE 0x02
+#define MIB_COUNTER_RX_FRAGMENT 0x03
+#define MIB_COUNTER_RX_OVERSIZE 0x04
+#define MIB_COUNTER_RX_JABBER 0x05
+#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
+#define MIB_COUNTER_RX_CRC_ERR 0x07
+#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
+#define MIB_COUNTER_RX_CTRL_8808 0x09
+#define MIB_COUNTER_RX_PAUSE 0x0A
+#define MIB_COUNTER_RX_BROADCAST 0x0B
+#define MIB_COUNTER_RX_MULTICAST 0x0C
+#define MIB_COUNTER_RX_UNICAST 0x0D
+#define MIB_COUNTER_RX_OCTET_64 0x0E
+#define MIB_COUNTER_RX_OCTET_65_127 0x0F
+#define MIB_COUNTER_RX_OCTET_128_255 0x10
+#define MIB_COUNTER_RX_OCTET_256_511 0x11
+#define MIB_COUNTER_RX_OCTET_512_1023 0x12
+#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
+#define MIB_COUNTER_TX_LO_PRIORITY 0x14
+#define MIB_COUNTER_TX_HI_PRIORITY 0x15
+#define MIB_COUNTER_TX_LATE_COLLISION 0x16
+#define MIB_COUNTER_TX_PAUSE 0x17
+#define MIB_COUNTER_TX_BROADCAST 0x18
+#define MIB_COUNTER_TX_MULTICAST 0x19
+#define MIB_COUNTER_TX_UNICAST 0x1A
+#define MIB_COUNTER_TX_DEFERRED 0x1B
+#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
+#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
+#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
+#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
+
+#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
+#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
+
+/**
+ * struct ksz_port_mib - Port MIB data structure
+ * @cnt_ptr: Current pointer to MIB counter index.
+ * @link_down: Indication the link has just gone down.
+ * @state: Connection status of the port.
+ * @mib_start: The starting counter index. Some ports do not start at 0.
+ * @counter: 64-bit MIB counter value.
+ * @dropped: Temporary buffer to remember last read packet dropped values.
+ *
+ * MIB counters needs to be read periodically so that counters do not get
+ * overflowed and give incorrect values. A right balance is needed to
+ * satisfy this condition and not waste too much CPU time.
+ *
+ * It is pointless to read MIB counters when the port is disconnected. The
+ * @state provides the connection status so that MIB counters are read only
+ * when the port is connected. The @link_down indicates the port is just
+ * disconnected so that all MIB counters are read one last time to update the
+ * information.
+ */
+struct ksz_port_mib {
+ u8 cnt_ptr;
+ u8 link_down;
+ u8 state;
+ u8 mib_start;
+
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+ u32 dropped[2];
+};
+
+/**
+ * struct ksz_port_cfg - Port configuration data structure
+ * @vid: VID value.
+ * @member: Port membership.
+ * @port_prio: Port priority.
+ * @rx_rate: Receive priority rate.
+ * @tx_rate: Transmit priority rate.
+ * @stp_state: Current Spanning Tree Protocol state.
+ */
+struct ksz_port_cfg {
+ u16 vid;
+ u8 member;
+ u8 port_prio;
+ u32 rx_rate[PRIO_QUEUES];
+ u32 tx_rate[PRIO_QUEUES];
+ int stp_state;
+};
+
+/**
+ * struct ksz_switch - KSZ8842 switch data structure
+ * @mac_table: MAC table entries information.
+ * @vlan_table: VLAN table entries information.
+ * @port_cfg: Port configuration information.
+ * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
+ * (bit7 ~ bit2) field.
+ * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
+ * Tag priority field.
+ * @br_addr: Bridge address. Used for STP.
+ * @other_addr: Other MAC address. Used for multiple network device mode.
+ * @broad_per: Broadcast storm percentage.
+ * @member: Current port membership. Used for STP.
+ */
+struct ksz_switch {
+ struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
+ struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
+ struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
+
+ u8 diffserv[DIFFSERV_ENTRIES];
+ u8 p_802_1p[PRIO_802_1P_ENTRIES];
+
+ u8 br_addr[MAC_ADDR_LEN];
+ u8 other_addr[MAC_ADDR_LEN];
+
+ u8 broad_per;
+ u8 member;
+};
+
+#define TX_RATE_UNIT 10000
+
+/**
+ * struct ksz_port_info - Port information data structure
+ * @state: Connection status of the port.
+ * @tx_rate: Transmit rate divided by 10000 to get Mbit.
+ * @duplex: Duplex mode.
+ * @advertised: Advertised auto-negotiation setting. Used to determine link.
+ * @partner: Auto-negotiation partner setting. Used to determine link.
+ * @port_id: Port index to access actual hardware register.
+ * @pdev: Pointer to OS dependent network device.
+ */
+struct ksz_port_info {
+ uint state;
+ uint tx_rate;
+ u8 duplex;
+ u8 advertised;
+ u8 partner;
+ u8 port_id;
+ void *pdev;
+};
+
+#define MAX_TX_HELD_SIZE 52000
+
+/* Hardware features and bug fixes. */
+#define LINK_INT_WORKING (1 << 0)
+#define SMALL_PACKET_TX_BUG (1 << 1)
+#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
+#define IPV6_CSUM_GEN_HACK (1 << 3)
+#define RX_HUGE_FRAME (1 << 4)
+#define STP_SUPPORT (1 << 8)
+
+/* Software overrides. */
+#define PAUSE_FLOW_CTRL (1 << 0)
+#define FAST_AGING (1 << 1)
+
+/**
+ * struct ksz_hw - KSZ884X hardware data structure
+ * @io: Virtual address assigned.
+ * @ksz_switch: Pointer to KSZ8842 switch.
+ * @port_info: Port information.
+ * @port_mib: Port MIB information.
+ * @dev_count: Number of network devices this hardware supports.
+ * @dst_ports: Destination ports in switch for transmission.
+ * @id: Hardware ID. Used for display only.
+ * @mib_cnt: Number of MIB counters this hardware has.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @tx_cfg: Cached transmit control settings.
+ * @rx_cfg: Cached receive control settings.
+ * @intr_mask: Current interrupt mask.
+ * @intr_set: Current interrup set.
+ * @intr_blocked: Interrupt blocked.
+ * @rx_desc_info: Receive descriptor information.
+ * @tx_desc_info: Transmit descriptor information.
+ * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
+ * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
+ * @tx_size: Transmit data size. Used for TX optimization.
+ * The maximum is defined by MAX_TX_HELD_SIZE.
+ * @perm_addr: Permanent MAC address.
+ * @override_addr: Overrided MAC address.
+ * @address: Additional MAC address entries.
+ * @addr_list_size: Additional MAC address list size.
+ * @mac_override: Indication of MAC address overrided.
+ * @promiscuous: Counter to keep track of promiscuous mode set.
+ * @all_multi: Counter to keep track of all multicast mode set.
+ * @multi_list: Multicast address entries.
+ * @multi_bits: Cached multicast hash table settings.
+ * @multi_list_size: Multicast address list size.
+ * @enabled: Indication of hardware enabled.
+ * @rx_stop: Indication of receive process stop.
+ * @features: Hardware features to enable.
+ * @overrides: Hardware features to override.
+ * @parent: Pointer to parent, network device private structure.
+ */
+struct ksz_hw {
+ void __iomem *io;
+
+ struct ksz_switch *ksz_switch;
+ struct ksz_port_info port_info[SWITCH_PORT_NUM];
+ struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
+ int dev_count;
+ int dst_ports;
+ int id;
+ int mib_cnt;
+ int mib_port_cnt;
+
+ u32 tx_cfg;
+ u32 rx_cfg;
+ u32 intr_mask;
+ u32 intr_set;
+ uint intr_blocked;
+
+ struct ksz_desc_info rx_desc_info;
+ struct ksz_desc_info tx_desc_info;
+
+ int tx_int_cnt;
+ int tx_int_mask;
+ int tx_size;
+
+ u8 perm_addr[MAC_ADDR_LEN];
+ u8 override_addr[MAC_ADDR_LEN];
+ u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 addr_list_size;
+ u8 mac_override;
+ u8 promiscuous;
+ u8 all_multi;
+ u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_bits[HW_MULTICAST_SIZE];
+ u8 multi_list_size;
+
+ u8 enabled;
+ u8 rx_stop;
+ u8 reserved2[1];
+
+ uint features;
+ uint overrides;
+
+ void *parent;
+};
+
+enum {
+ PHY_NO_FLOW_CTRL,
+ PHY_FLOW_CTRL,
+ PHY_TX_ONLY,
+ PHY_RX_ONLY
+};
+
+/**
+ * struct ksz_port - Virtual port data structure
+ * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
+ * duplex, and 0 for auto, which normally results in full
+ * duplex.
+ * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
+ * 0 for auto, which normally results in 100 Mbit.
+ * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
+ * force.
+ * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
+ * control, and PHY_FLOW_CTRL for flow control.
+ * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
+ * Mbit PHY.
+ * @first_port: Index of first port this port supports.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @port_cnt: Number of ports this port supports.
+ * @counter: Port statistics counter.
+ * @hw: Pointer to hardware structure.
+ * @linked: Pointer to port information linked to this port.
+ */
+struct ksz_port {
+ u8 duplex;
+ u8 speed;
+ u8 force_link;
+ u8 flow_ctrl;
+
+ int first_port;
+ int mib_port_cnt;
+ int port_cnt;
+ u64 counter[OID_COUNTER_LAST];
+
+ struct ksz_hw *hw;
+ struct ksz_port_info *linked;
+};
+
+/**
+ * struct ksz_timer_info - Timer information data structure
+ * @timer: Kernel timer.
+ * @cnt: Running timer counter.
+ * @max: Number of times to run timer; -1 for infinity.
+ * @period: Timer period in jiffies.
+ */
+struct ksz_timer_info {
+ struct timer_list timer;
+ int cnt;
+ int max;
+ int period;
+};
+
+/**
+ * struct ksz_shared_mem - OS dependent shared memory data structure
+ * @dma_addr: Physical DMA address allocated.
+ * @alloc_size: Allocation size.
+ * @phys: Actual physical address used.
+ * @alloc_virt: Virtual address allocated.
+ * @virt: Actual virtual address used.
+ */
+struct ksz_shared_mem {
+ dma_addr_t dma_addr;
+ uint alloc_size;
+ uint phys;
+ u8 *alloc_virt;
+ u8 *virt;
+};
+
+/**
+ * struct ksz_counter_info - OS dependent counter information data structure
+ * @counter: Wait queue to wakeup after counters are read.
+ * @time: Next time in jiffies to read counter.
+ * @read: Indication of counters read in full or not.
+ */
+struct ksz_counter_info {
+ wait_queue_head_t counter;
+ unsigned long time;
+ int read;
+};
+
+/**
+ * struct dev_info - Network device information data structure
+ * @dev: Pointer to network device.
+ * @pdev: Pointer to PCI device.
+ * @hw: Hardware structure.
+ * @desc_pool: Physical memory used for descriptor pool.
+ * @hwlock: Spinlock to prevent hardware from accessing.
+ * @lock: Mutex lock to prevent device from accessing.
+ * @dev_rcv: Receive process function used.
+ * @last_skb: Socket buffer allocated for descriptor rx fragments.
+ * @skb_index: Buffer index for receiving fragments.
+ * @skb_len: Buffer length for receiving fragments.
+ * @mib_read: Workqueue to read MIB counters.
+ * @mib_timer_info: Timer to read MIB counters.
+ * @counter: Used for MIB reading.
+ * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
+ * the maximum is MAX_RX_BUF_SIZE.
+ * @opened: Counter to keep track of device open.
+ * @rx_tasklet: Receive processing tasklet.
+ * @tx_tasklet: Transmit processing tasklet.
+ * @wol_enable: Wake-on-LAN enable set by ethtool.
+ * @wol_support: Wake-on-LAN support used by ethtool.
+ * @pme_wait: Used for KSZ8841 power management.
+ */
+struct dev_info {
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ struct ksz_hw hw;
+ struct ksz_shared_mem desc_pool;
+
+ spinlock_t hwlock;
+ struct mutex lock;
+
+ int (*dev_rcv)(struct dev_info *);
+
+ struct sk_buff *last_skb;
+ int skb_index;
+ int skb_len;
+
+ struct work_struct mib_read;
+ struct ksz_timer_info mib_timer_info;
+ struct ksz_counter_info counter[TOTAL_PORT_NUM];
+
+ int mtu;
+ int opened;
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ int wol_enable;
+ int wol_support;
+ unsigned long pme_wait;
+};
+
+/**
+ * struct dev_priv - Network device private data structure
+ * @adapter: Adapter device information.
+ * @port: Port information.
+ * @monitor_time_info: Timer to monitor ports.
+ * @stats: Network statistics.
+ * @proc_sem: Semaphore for proc accessing.
+ * @id: Device ID.
+ * @mii_if: MII interface information.
+ * @advertising: Temporary variable to store advertised settings.
+ * @msg_enable: The message flags controlling driver output.
+ * @media_state: The connection status of the device.
+ * @multicast: The all multicast state of the device.
+ * @promiscuous: The promiscuous state of the device.
+ */
+struct dev_priv {
+ struct dev_info *adapter;
+ struct ksz_port port;
+ struct ksz_timer_info monitor_timer_info;
+ struct net_device_stats stats;
+
+ struct semaphore proc_sem;
+ int id;
+
+ struct mii_if_info mii_if;
+ u32 advertising;
+
+ u32 msg_enable;
+ int media_state;
+ int multicast;
+ int promiscuous;
+};
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
+
+#define DRV_NAME "KSZ884X PCI"
+#define DEVICE_NAME "KSZ884x PCI"
+#define DRV_VERSION "1.0.0"
+#define DRV_RELDATE "Feb 8, 2010"
+
+static char version[] __devinitdata =
+ "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
+
+static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
+
+/*
+ * Interrupt processing primary routines
+ */
+
+static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
+{
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
+}
+
+static inline void hw_dis_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = hw->intr_mask;
+ writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
+{
+ hw->intr_set = interrupt;
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_ena_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = 0;
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
+{
+ hw->intr_mask &= ~(bit);
+}
+
+static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr & ~interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw_dis_intr_bit(hw, interrupt);
+}
+
+/**
+ * hw_turn_on_intr - turn on specified interrupts
+ * @hw: The hardware instance.
+ * @bit: The interrupt bits to be on.
+ *
+ * This routine turns on the specified interrupts in the interrupt mask so that
+ * those interrupts will be enabled.
+ */
+static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
+{
+ hw->intr_mask |= bit;
+
+ if (!hw->intr_blocked)
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr | interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
+{
+ *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
+ *status = *status & hw->intr_set;
+}
+
+static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
+{
+ if (interrupt)
+ hw_ena_intr(hw);
+}
+
+/**
+ * hw_block_intr - block hardware interrupts
+ *
+ * This function blocks all interrupts of the hardware and returns the current
+ * interrupt enable mask so that interrupts can be restored later.
+ *
+ * Return the current interrupt enable mask.
+ */
+static uint hw_block_intr(struct ksz_hw *hw)
+{
+ uint interrupt = 0;
+
+ if (!hw->intr_blocked) {
+ hw_dis_intr(hw);
+ interrupt = hw->intr_blocked;
+ }
+ return interrupt;
+}
+
+/*
+ * Hardware descriptor routines
+ */
+
+static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
+{
+ status.rx.hw_owned = 0;
+ desc->phw->ctrl.data = cpu_to_le32(status.data);
+}
+
+static inline void release_desc(struct ksz_desc *desc)
+{
+ desc->sw.ctrl.tx.hw_owned = 1;
+ if (desc->sw.buf_size != desc->sw.buf.data) {
+ desc->sw.buf_size = desc->sw.buf.data;
+ desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
+ }
+ desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
+}
+
+static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->last];
+ info->last++;
+ info->last &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
+}
+
+static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_rx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.rx.buf_size = len;
+}
+
+static inline void get_tx_pkt(struct ksz_desc_info *info,
+ struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->next];
+ info->next++;
+ info->next &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
+}
+
+static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_tx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.tx.buf_size = len;
+}
+
+/* Switch functions */
+
+#define TABLE_READ 0x10
+#define TABLE_SEL_SHIFT 2
+
+#define HW_DELAY(hw, reg) \
+ do { \
+ u16 dummy; \
+ dummy = readw(hw->io + reg); \
+ } while (0)
+
+/**
+ * sw_r_table - read 4 bytes of data from switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data: Buffer to store the read data.
+ *
+ * This routine reads 4 bytes of data from the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_table_64 - write 8 bytes of data to the switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data_hi: The high part of data to be written (bit63 ~ bit32).
+ * @data_lo: The low part of data to be written (bit31 ~ bit0).
+ *
+ * This routine writes 8 bytes of data to the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of written data.
+ */
+static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
+ u32 data_lo)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
+ writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_sta_mac_table - write to the static MAC table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @mac_addr: The MAC address.
+ * @ports: The port members.
+ * @override: The flag to override the port receive/transmit settings.
+ * @valid: The flag to indicate entry is valid.
+ * @use_fid: The flag to indicate the FID is valid.
+ * @fid: The FID value.
+ *
+ * This routine writes an entry of the static MAC table of the switch. It
+ * calls sw_w_table_64() to write the data.
+ */
+static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
+ u8 ports, int override, int valid, int use_fid, u8 fid)
+{
+ u32 data_hi;
+ u32 data_lo;
+
+ data_lo = ((u32) mac_addr[2] << 24) |
+ ((u32) mac_addr[3] << 16) |
+ ((u32) mac_addr[4] << 8) | mac_addr[5];
+ data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
+ data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
+
+ if (override)
+ data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ if (use_fid) {
+ data_hi |= STATIC_MAC_TABLE_USE_FID;
+ data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
+ }
+ if (valid)
+ data_hi |= STATIC_MAC_TABLE_VALID;
+
+ sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
+}
+
+/**
+ * sw_r_vlan_table - read from the VLAN table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @vid: Buffer to store the VID.
+ * @fid: Buffer to store the VID.
+ * @member: Buffer to store the port membership.
+ *
+ * This function reads an entry of the VLAN table of the switch. It calls
+ * sw_r_table() to get the data.
+ *
+ * Return 0 if the entry is valid; otherwise -1.
+ */
+static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
+ u8 *member)
+{
+ u32 data;
+
+ sw_r_table(hw, TABLE_VLAN, addr, &data);
+ if (data & VLAN_TABLE_VALID) {
+ *vid = (u16)(data & VLAN_TABLE_VID);
+ *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
+ *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
+ VLAN_TABLE_MEMBERSHIP_SHIFT);
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * port_r_mib_cnt - read MIB counter
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @addr: The address of the counter.
+ * @cnt: Buffer to store the counter.
+ *
+ * This routine reads a MIB counter of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
+{
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int timeout;
+
+ ctrl_addr = addr + PORT_COUNTER_NUM * port;
+
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ for (timeout = 100; timeout > 0; timeout--) {
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ if (data & MIB_COUNTER_VALID) {
+ if (data & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * port_r_mib_pkt - read dropped packet counts
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @cnt: Buffer to store the receive and transmit dropped packet counts.
+ *
+ * This routine reads the dropped packet counts of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
+{
+ u32 cur;
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int index;
+
+ index = KS_MIB_PACKET_DROPPED_RX_0 + port;
+ do {
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr = (u16) index;
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
+ << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = *last;
+ if (data != cur) {
+ *last = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+ ++last;
+ ++cnt;
+ index -= KS_MIB_PACKET_DROPPED_TX -
+ KS_MIB_PACKET_DROPPED_TX_0 + 1;
+ } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
+}
+
+/**
+ * port_r_cnt - read MIB counters periodically
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to read the counters of the port periodically to avoid
+ * counter overflow. The hardware should be acquired first before calling this
+ * routine.
+ *
+ * Return non-zero when not all counters not read.
+ */
+static int port_r_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ while (mib->cnt_ptr < PORT_COUNTER_NUM) {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ mib->cnt_ptr = 0;
+ return 0;
+}
+
+/**
+ * port_init_cnt - initialize MIB counter values
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to initialize all counters to zero if the hardware
+ * cannot do it after reset.
+ */
+static void port_init_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ mib->cnt_ptr = 0;
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ do {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ } while (mib->cnt_ptr < PORT_COUNTER_NUM);
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ mib->cnt_ptr = 0;
+}
+
+/*
+ * Port functions
+ */
+
+/**
+ * port_chk - check port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the port register are set
+ * or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * port_cfg - set port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This routine sets or resets the specified bits of the port register.
+ */
+static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
+ int set)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_chk_shift - check port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ *
+ * This function checks whether the specified port is set in the register or
+ * not.
+ *
+ * Return 0 if the port is not set.
+ */
+static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
+{
+ u16 data;
+ u16 bit = 1 << port;
+
+ data = readw(hw->io + addr);
+ data >>= shift;
+ return (data & bit) == bit;
+}
+
+/**
+ * port_cfg_shift - set port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ * @set: The flag indicating whether the port is to be set or not.
+ *
+ * This routine sets or resets the specified port in the register.
+ */
+static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
+ int set)
+{
+ u16 data;
+ u16 bits = 1 << port;
+
+ data = readw(hw->io + addr);
+ bits <<= shift;
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_r8 - read byte from port register
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a byte from the port register.
+ */
+static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readb(hw->io + addr);
+}
+
+/**
+ * port_r16 - read word from port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a word from the port register.
+ */
+static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readw(hw->io + addr);
+}
+
+/**
+ * port_w16 - write word to port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Data to write.
+ *
+ * This routine writes a word to the port register.
+ */
+static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * sw_chk - check switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the switch register are
+ * set or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * sw_cfg - set switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This function sets or resets the specified bits of the switch register.
+ */
+static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/* Bandwidth */
+
+static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
+}
+
+static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
+}
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROTECTION_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+/**
+ * sw_cfg_broad_storm - configure broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ */
+static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ u16 data;
+ u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
+
+ if (value > BROADCAST_STORM_RATE)
+ value = BROADCAST_STORM_RATE;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
+ data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+}
+
+/**
+ * sw_get_board_storm - get broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Buffer to store the broadcast storm threshold percentage.
+ *
+ * This routine retrieves the broadcast storm threshold of the switch.
+ */
+static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
+{
+ int num;
+ u16 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ num = (data & BROADCAST_STORM_RATE_HI);
+ num <<= 8;
+ num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
+ num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
+ *percent = (u8) num;
+}
+
+/**
+ * sw_dis_broad_storm - disable broadstorm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the broadcast storm limit function of the switch.
+ */
+static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
+{
+ port_cfg_broad_storm(hw, port, 0);
+}
+
+/**
+ * sw_ena_broad_storm - enable broadcast storm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine enables the broadcast storm limit function of the switch.
+ */
+static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
+{
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ port_cfg_broad_storm(hw, port, 1);
+}
+
+/**
+ * sw_init_broad_storm - initialize broadcast storm
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the broadcast storm limit function of the switch.
+ */
+static void sw_init_broad_storm(struct ksz_hw *hw)
+{
+ int port;
+
+ hw->ksz_switch->broad_per = 1;
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ for (port = 0; port < TOTAL_PORT_NUM; port++)
+ sw_dis_broad_storm(hw, port);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
+}
+
+/**
+ * hw_cfg_broad_storm - configure broadcast storm
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ * It is called by user functions. The hardware should be acquired first.
+ */
+static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ if (percent > 100)
+ percent = 100;
+
+ sw_cfg_broad_storm(hw, percent);
+ sw_get_broad_storm(hw, &percent);
+ hw->ksz_switch->broad_per = percent;
+}
+
+/**
+ * sw_dis_prio_rate - disable switch priority rate
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the priority rate function of the switch.
+ */
+static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_IN_RATE_OFFSET;
+ writel(0, hw->io + addr);
+}
+
+/**
+ * sw_init_prio_rate - initialize switch prioirty rate
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the priority rate function of the switch.
+ */
+static void sw_init_prio_rate(struct ksz_hw *hw)
+{
+ int port;
+ int prio;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ for (prio = 0; prio < PRIO_QUEUES; prio++) {
+ sw->port_cfg[port].rx_rate[prio] =
+ sw->port_cfg[port].tx_rate[prio] = 0;
+ }
+ sw_dis_prio_rate(hw, port);
+ }
+}
+
+/* Communication */
+
+static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
+}
+
+static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
+}
+
+static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
+}
+
+static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
+}
+
+/* Spanning Tree */
+
+static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
+}
+
+static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
+}
+
+static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
+}
+
+static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
+}
+
+static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
+{
+ if (!(hw->overrides & FAST_AGING)) {
+ sw_cfg_fast_aging(hw, 1);
+ mdelay(1);
+ sw_cfg_fast_aging(hw, 0);
+ }
+}
+
+/* VLAN */
+
+static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
+}
+
+static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
+}
+
+static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
+}
+
+static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
+}
+
+static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
+}
+
+static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
+}
+
+static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
+}
+
+static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
+}
+
+/* Mirroring */
+
+static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
+}
+
+static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
+}
+
+static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
+}
+
+static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
+}
+
+static void sw_init_mirror(struct ksz_hw *hw)
+{
+ int port;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_cfg_mirror_sniffer(hw, port, 0);
+ port_cfg_mirror_rx(hw, port, 0);
+ port_cfg_mirror_tx(hw, port, 0);
+ }
+ sw_cfg_mirror_rx_tx(hw, 0);
+}
+
+static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE, set);
+}
+
+static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
+{
+ return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE);
+}
+
+static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
+}
+
+static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
+{
+ return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
+}
+
+/* Priority */
+
+static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
+}
+
+static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
+}
+
+static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
+}
+
+static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
+}
+
+static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
+}
+
+static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
+}
+
+static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
+}
+
+static inline int port_chk_prio(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
+}
+
+/**
+ * sw_dis_diffserv - disable switch DiffServ priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the DiffServ priority function of the switch.
+ */
+static void sw_dis_diffserv(struct ksz_hw *hw, int port)
+{
+ port_cfg_diffserv(hw, port, 0);
+}
+
+/**
+ * sw_dis_802_1p - disable switch 802.1p priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the 802.1p priority function of the switch.
+ */
+static void sw_dis_802_1p(struct ksz_hw *hw, int port)
+{
+ port_cfg_802_1p(hw, port, 0);
+}
+
+/**
+ * sw_cfg_replace_null_vid -
+ * @hw: The hardware instance.
+ * @set: The flag to disable or enable.
+ *
+ */
+static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
+}
+
+/**
+ * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @set: The flag to disable or enable.
+ *
+ * This routine enables the 802.1p priority re-mapping function of the switch.
+ * That allows 802.1p priority field to be replaced with the port's default
+ * tag's priority value if the ingress packet's 802.1p priority has a higher
+ * priority than port's default tag's priority.
+ */
+static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_replace_vid(hw, port, set);
+}
+
+/**
+ * sw_cfg_port_based - configure switch port based priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @prio: The priority to set.
+ *
+ * This routine configures the port based priority of the switch.
+ */
+static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
+{
+ u16 data;
+
+ if (prio > PORT_BASED_PRIORITY_BASE)
+ prio = PORT_BASED_PRIORITY_BASE;
+
+ hw->ksz_switch->port_cfg[port].port_prio = prio;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
+ data &= ~PORT_BASED_PRIORITY_MASK;
+ data |= prio << PORT_BASED_PRIORITY_SHIFT;
+ port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
+}
+
+/**
+ * sw_dis_multi_queue - disable transmit multiple queues
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the transmit multiple queues selection of the switch
+ * port. Only single transmit queue on the port.
+ */
+static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
+{
+ port_cfg_prio(hw, port, 0);
+}
+
+/**
+ * sw_init_prio - initialize switch priority
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the switch QoS priority functions.
+ */
+static void sw_init_prio(struct ksz_hw *hw)
+{
+ int port;
+ int tos;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /*
+ * Init all the 802.1p tag priority value to be assigned to different
+ * priority queue.
+ */
+ sw->p_802_1p[0] = 0;
+ sw->p_802_1p[1] = 0;
+ sw->p_802_1p[2] = 1;
+ sw->p_802_1p[3] = 1;
+ sw->p_802_1p[4] = 2;
+ sw->p_802_1p[5] = 2;
+ sw->p_802_1p[6] = 3;
+ sw->p_802_1p[7] = 3;
+
+ /*
+ * Init all the DiffServ priority value to be assigned to priority
+ * queue 0.
+ */
+ for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
+ sw->diffserv[tos] = 0;
+
+ /* All QoS functions disabled. */
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ sw_dis_multi_queue(hw, port);
+ sw_dis_diffserv(hw, port);
+ sw_dis_802_1p(hw, port);
+ sw_cfg_replace_vid(hw, port, 0);
+
+ sw->port_cfg[port].port_prio = 0;
+ sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
+ }
+ sw_cfg_replace_null_vid(hw, 0);
+}
+
+/**
+ * port_get_def_vid - get port default VID.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @vid: Buffer to store the VID.
+ *
+ * This routine retrieves the default VID of the port.
+ */
+static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_VID_OFFSET;
+ *vid = readw(hw->io + addr);
+}
+
+/**
+ * sw_init_vlan - initialize switch VLAN
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the VLAN function of the switch.
+ */
+static void sw_init_vlan(struct ksz_hw *hw)
+{
+ int port;
+ int entry;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* Read 16 VLAN entries from device's VLAN table. */
+ for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
+ sw_r_vlan_table(hw, entry,
+ &sw->vlan_table[entry].vid,
+ &sw->vlan_table[entry].fid,
+ &sw->vlan_table[entry].member);
+ }
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
+ sw->port_cfg[port].member = PORT_MASK;
+ }
+}
+
+/**
+ * sw_cfg_port_base_vlan - configure port-based VLAN membership
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @member: The port-based VLAN membership.
+ *
+ * This routine configures the port-based VLAN membership of the port.
+ */
+static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
+{
+ u32 addr;
+ u8 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_2_OFFSET;
+
+ data = readb(hw->io + addr);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & PORT_MASK);
+ writeb(data, hw->io + addr);
+
+ hw->ksz_switch->port_cfg[port].member = member;
+}
+
+/**
+ * sw_get_addr - get the switch MAC address.
+ * @hw: The hardware instance.
+ * @mac_addr: Buffer to store the MAC address.
+ *
+ * This function retrieves the MAC address of the switch.
+ */
+static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_addr - configure switch MAC address
+ * @hw: The hardware instance.
+ * @mac_addr: The MAC address.
+ *
+ * This function configures the MAC address of the switch.
+ */
+static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_global_ctrl - set switch global control
+ * @hw: The hardware instance.
+ *
+ * This routine sets the global control of the switch function.
+ */
+static void sw_set_global_ctrl(struct ksz_hw *hw)
+{
+ u16 data;
+
+ /* Enable switch MII flow control. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data |= SWITCH_FLOW_CTRL;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ data |= SWITCH_AGGR_BACKOFF;
+
+ /* Enable automatic fast aging when link changed detected. */
+ data |= SWITCH_AGING_ENABLE;
+ data |= SWITCH_LINK_AUTO_AGING;
+
+ if (hw->overrides & FAST_AGING)
+ data |= SWITCH_FAST_AGING;
+ else
+ data &= ~SWITCH_FAST_AGING;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+
+ /* Enable no excessive collision drop. */
+ data |= NO_EXC_COLLISION_DROP;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+}
+
+enum {
+ STP_STATE_DISABLED = 0,
+ STP_STATE_LISTENING,
+ STP_STATE_LEARNING,
+ STP_STATE_FORWARDING,
+ STP_STATE_BLOCKED,
+ STP_STATE_SIMPLE
+};
+
+/**
+ * port_set_stp_state - configure port spanning tree state
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @state: The spanning tree state.
+ *
+ * This routine configures the spanning tree state of the port.
+ */
+static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
+{
+ u16 data;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
+ switch (state) {
+ case STP_STATE_DISABLED:
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LISTENING:
+/*
+ * No need to turn on transmit because of port direct mode.
+ * Turning on receive is required if static MAC table is not setup.
+ */
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LEARNING:
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_BLOCKED:
+/*
+ * Need to setup static MAC table with override to keep receiving BPDU
+ * messages. See sw_init_stp routine.
+ */
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_SIMPLE:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ }
+ port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
+ hw->ksz_switch->port_cfg[port].stp_state = state;
+}
+
+#define STP_ENTRY 0
+#define BROADCAST_ENTRY 1
+#define BRIDGE_ADDR_ENTRY 2
+#define IPV6_ADDR_ENTRY 3
+
+/**
+ * sw_clr_sta_mac_table - clear static MAC table
+ * @hw: The hardware instance.
+ *
+ * This routine clears the static MAC table.
+ */
+static void sw_clr_sta_mac_table(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, 0,
+ entry->use_fid, entry->fid);
+ }
+}
+
+/**
+ * sw_init_stp - initialize switch spanning tree support
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the spanning tree support of the switch.
+ */
+static void sw_init_stp(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+
+ entry = &hw->ksz_switch->mac_table[STP_ENTRY];
+ entry->mac_addr[0] = 0x01;
+ entry->mac_addr[1] = 0x80;
+ entry->mac_addr[2] = 0xC2;
+ entry->mac_addr[3] = 0x00;
+ entry->mac_addr[4] = 0x00;
+ entry->mac_addr[5] = 0x00;
+ entry->ports = HOST_MASK;
+ entry->override = 1;
+ entry->valid = 1;
+ sw_w_sta_mac_table(hw, STP_ENTRY,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+}
+
+/**
+ * sw_block_addr - block certain packets from the host port
+ * @hw: The hardware instance.
+ *
+ * This routine blocks certain packets from reaching to the host port.
+ */
+static void sw_block_addr(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ entry->valid = 0;
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+ }
+}
+
+#define PHY_LINK_SUPPORT \
+ (PHY_AUTO_NEG_ASYM_PAUSE | \
+ PHY_AUTO_NEG_SYM_PAUSE | \
+ PHY_AUTO_NEG_100BT4 | \
+ PHY_AUTO_NEG_100BTX_FD | \
+ PHY_AUTO_NEG_100BTX | \
+ PHY_AUTO_NEG_10BT_FD | \
+ PHY_AUTO_NEG_10BT)
+
+static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
+}
+
+static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
+}
+
+static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+/**
+ * hw_r_phy - read data from PHY register
+ * @hw: The hardware instance.
+ * @port: Port to read.
+ * @reg: PHY register to read.
+ * @val: Buffer to store the read data.
+ *
+ * This routine reads data from the PHY register.
+ */
+static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ *val = readw(hw->io + phy);
+}
+
+/**
+ * port_w_phy - write data to PHY register
+ * @hw: The hardware instance.
+ * @port: Port to write.
+ * @reg: PHY register to write.
+ * @val: Word data to write.
+ *
+ * This routine writes data to the PHY register.
+ */
+static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ writew(val, hw->io + phy);
+}
+
+/*
+ * EEPROM access functions
+ */
+
+#define AT93C_CODE 0
+#define AT93C_WR_OFF 0x00
+#define AT93C_WR_ALL 0x10
+#define AT93C_ER_ALL 0x20
+#define AT93C_WR_ON 0x30
+
+#define AT93C_WRITE 1
+#define AT93C_READ 2
+#define AT93C_ERASE 3
+
+#define EEPROM_DELAY 4
+
+static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data &= ~gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data |= gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ return (u8)(data & gpio);
+}
+
+static void eeprom_clk(struct ksz_hw *hw)
+{
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+}
+
+static u16 spi_r(struct ksz_hw *hw)
+{
+ int i;
+ u16 temp = 0;
+
+ for (i = 15; i >= 0; i--) {
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+
+ temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
+
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ }
+ return temp;
+}
+
+static void spi_w(struct ksz_hw *hw, u16 data)
+{
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
+{
+ int i;
+
+ /* Initial start bit */
+ raise_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+
+ /* AT93C operation */
+ for (i = 1; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+
+ /* Address location */
+ for (i = 5; i >= 0; i--) {
+ (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+#define EEPROM_DATA_RESERVED 0
+#define EEPROM_DATA_MAC_ADDR_0 1
+#define EEPROM_DATA_MAC_ADDR_1 2
+#define EEPROM_DATA_MAC_ADDR_2 3
+#define EEPROM_DATA_SUBSYS_ID 4
+#define EEPROM_DATA_SUBSYS_VEN_ID 5
+#define EEPROM_DATA_PM_CAP 6
+
+/* User defined EEPROM data */
+#define EEPROM_DATA_OTHER_MAC_ADDR 9
+
+/**
+ * eeprom_read - read from AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ *
+ * This function reads a word from the AT93C46 EEPROM.
+ *
+ * Return the data value.
+ */
+static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
+{
+ u16 data;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ spi_reg(hw, AT93C_READ, reg);
+ data = spi_r(hw);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ return data;
+}
+
+/**
+ * eeprom_write - write to AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ * @data: The data value.
+ *
+ * This procedure writes a word to the AT93C46 EEPROM.
+ */
+static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
+{
+ int timeout;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ /* Enable write. */
+ spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Erase the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_ERASE, reg);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Write the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_WRITE, reg);
+ spi_w(hw, data);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Disable write. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+}
+
+/*
+ * Link detection routines
+ */
+
+static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
+{
+ ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
+ switch (port->flow_ctrl) {
+ case PHY_FLOW_CTRL:
+ ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
+ break;
+ /* Not supported. */
+ case PHY_TX_ONLY:
+ case PHY_RX_ONLY:
+ default:
+ break;
+ }
+ return ctrl;
+}
+
+static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
+{
+ u32 rx_cfg;
+ u32 tx_cfg;
+
+ rx_cfg = hw->rx_cfg;
+ tx_cfg = hw->tx_cfg;
+ if (rx)
+ hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
+ else
+ hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
+ if (tx)
+ hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
+ else
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled) {
+ if (rx_cfg != hw->rx_cfg)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ if (tx_cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
+ u16 local, u16 remote)
+{
+ int rx;
+ int tx;
+
+ if (hw->overrides & PAUSE_FLOW_CTRL)
+ return;
+
+ rx = tx = 0;
+ if (port->force_link)
+ rx = tx = 1;
+ if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
+ if (local & PHY_AUTO_NEG_SYM_PAUSE) {
+ rx = tx = 1;
+ } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
+ (local & PHY_AUTO_NEG_PAUSE) ==
+ PHY_AUTO_NEG_ASYM_PAUSE) {
+ tx = 1;
+ }
+ } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
+ if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
+ rx = 1;
+ }
+ if (!hw->ksz_switch)
+ set_flow_ctrl(hw, rx, tx);
+}
+
+static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
+ struct ksz_port_info *info, u16 link_status)
+{
+ if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
+ !(hw->overrides & PAUSE_FLOW_CTRL)) {
+ u32 cfg = hw->tx_cfg;
+
+ /* Disable flow control in the half duplex mode. */
+ if (1 == info->duplex)
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled && cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+/**
+ * port_get_link_speed - get current link status
+ * @port: The port instance.
+ *
+ * This routine reads PHY registers to determine the current link status of the
+ * switch ports.
+ */
+static void port_get_link_speed(struct ksz_port *port)
+{
+ uint interrupt;
+ struct ksz_port_info *info;
+ struct ksz_port_info *linked = NULL;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 status;
+ u8 local;
+ u8 remote;
+ int i;
+ int p;
+ int change = 0;
+
+ interrupt = hw_block_intr(hw);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ /*
+ * Link status is changing all the time even when there is no
+ * cable connection!
+ */
+ remote = status & (PORT_AUTO_NEG_COMPLETE |
+ PORT_STATUS_LINK_GOOD);
+ local = (u8) data;
+
+ /* No change to status. */
+ if (local == info->advertised && remote == info->partner)
+ continue;
+
+ info->advertised = local;
+ info->partner = remote;
+ if (status & PORT_STATUS_LINK_GOOD) {
+
+ /* Remember the first linked port. */
+ if (!linked)
+ linked = info;
+
+ info->tx_rate = 10 * TX_RATE_UNIT;
+ if (status & PORT_STATUS_SPEED_100MBIT)
+ info->tx_rate = 100 * TX_RATE_UNIT;
+
+ info->duplex = 1;
+ if (status & PORT_STATUS_FULL_DUPLEX)
+ info->duplex = 2;
+
+ if (media_connected != info->state) {
+ hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
+ &data);
+ hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
+ &status);
+ determine_flow_ctrl(hw, port, data, status);
+ if (hw->ksz_switch) {
+ port_cfg_back_pressure(hw, p,
+ (1 == info->duplex));
+ }
+ change |= 1 << i;
+ port_cfg_change(hw, port, info, status);
+ }
+ info->state = media_connected;
+ } else {
+ if (media_disconnected != info->state) {
+ change |= 1 << i;
+
+ /* Indicate the link just goes down. */
+ hw->port_mib[p].link_down = 1;
+ }
+ info->state = media_disconnected;
+ }
+ hw->port_mib[p].state = (u8) info->state;
+ }
+
+ if (linked && media_disconnected == port->linked->state)
+ port->linked = linked;
+
+ hw_restore_intr(hw, interrupt);
+}
+
+#define PHY_RESET_TIMEOUT 10
+
+/**
+ * port_set_link_speed - set port speed
+ * @port: The port instance.
+ *
+ * This routine sets the link speed of the switch ports.
+ */
+static void port_set_link_speed(struct ksz_port *port)
+{
+ struct ksz_port_info *info;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 cfg;
+ u8 status;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ cfg = 0;
+ if (status & PORT_STATUS_LINK_GOOD)
+ cfg = data;
+
+ data |= PORT_AUTO_NEG_ENABLE;
+ data = advertised_flow_ctrl(port, data);
+
+ data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
+
+ /* Check if manual configuration is specified by the user. */
+ if (port->speed || port->duplex) {
+ if (10 == port->speed)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX);
+ else if (100 == port->speed)
+ data &= ~(PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (1 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_10BT_FD);
+ else if (2 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT);
+ }
+ if (data != cfg) {
+ data |= PORT_AUTO_NEG_RESTART;
+ port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
+ }
+ }
+}
+
+/**
+ * port_force_link_speed - force port speed
+ * @port: The port instance.
+ *
+ * This routine forces the link speed of the switch ports.
+ */
+static void port_force_link_speed(struct ksz_port *port)
+{
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ int i;
+ int phy;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
+ hw_r_phy_ctrl(hw, phy, &data);
+
+ data &= ~PHY_AUTO_NEG_ENABLE;
+
+ if (10 == port->speed)
+ data &= ~PHY_SPEED_100MBIT;
+ else if (100 == port->speed)
+ data |= PHY_SPEED_100MBIT;
+ if (1 == port->duplex)
+ data &= ~PHY_FULL_DUPLEX;
+ else if (2 == port->duplex)
+ data |= PHY_FULL_DUPLEX;
+ hw_w_phy_ctrl(hw, phy, data);
+ }
+}
+
+static void port_set_power_saving(struct ksz_port *port, int enable)
+{
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
+ port_cfg(hw, p,
+ KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
+}
+
+/*
+ * KSZ8841 power management functions
+ */
+
+/**
+ * hw_chk_wol_pme_status - check PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This function is used to check PMEN pin is asserted.
+ *
+ * Return 1 if PMEN pin is asserted; otherwise, 0.
+ */
+static int hw_chk_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return 0;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
+}
+
+/**
+ * hw_clr_wol_pme_status - clear PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This routine is used to clear PME_Status to deassert PMEN pin.
+ */
+static void hw_clr_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+
+ /* Clear PME_Status to deassert PMEN pin. */
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data |= PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol_pme - enable or disable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable Wake-on-LAN.
+ */
+static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data &= ~PCI_PM_CTRL_STATE_MASK;
+ if (set)
+ data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
+ else
+ data &= ~PCI_PM_CTRL_PME_ENABLE;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol - configure Wake-on-LAN features
+ * @hw: The hardware instance.
+ * @frame: The pattern frame bit.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable certain Wake-on-LAN features.
+ */
+static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
+ if (set)
+ data |= frame;
+ else
+ data &= ~frame;
+ writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
+}
+
+/**
+ * hw_set_wol_frame - program Wake-on-LAN pattern
+ * @hw: The hardware instance.
+ * @i: The frame index.
+ * @mask_size: The size of the mask.
+ * @mask: Mask to ignore certain bytes in the pattern.
+ * @frame_size: The size of the frame.
+ * @pattern: The frame data.
+ *
+ * This routine is used to program Wake-on-LAN pattern.
+ */
+static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
+ u8 *mask, uint frame_size, u8 *pattern)
+{
+ int bits;
+ int from;
+ int len;
+ int to;
+ u32 crc;
+ u8 data[64];
+ u8 val = 0;
+
+ if (frame_size > mask_size * 8)
+ frame_size = mask_size * 8;
+ if (frame_size > 64)
+ frame_size = 64;
+
+ i *= 0x10;
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
+
+ bits = len = from = to = 0;
+ do {
+ if (bits) {
+ if ((val & 1))
+ data[to++] = pattern[from];
+ val >>= 1;
+ ++from;
+ --bits;
+ } else {
+ val = mask[len];
+ writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
+ + len);
+ ++len;
+ if (val)
+ bits = 8;
+ else
+ from += 8;
+ }
+ } while (from < (int) frame_size);
+ if (val) {
+ bits = mask[len - 1];
+ val <<= (from % 8);
+ bits &= ~val;
+ writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
+ 1);
+ }
+ crc = ether_crc(to, data);
+ writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
+}
+
+/**
+ * hw_add_wol_arp - add ARP pattern
+ * @hw: The hardware instance.
+ * @ip_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to add ARP pattern for waking up the host.
+ */
+static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
+{
+ u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
+ u8 pattern[42] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x06,
+ 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[38], ip_addr, 4);
+ hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
+}
+
+/**
+ * hw_add_wol_bcast - add broadcast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add broadcast pattern for waking up the host.
+ */
+static void hw_add_wol_bcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+}
+
+/**
+ * hw_add_wol_mcast - add multicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add multicast pattern for waking up the host.
+ *
+ * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
+ * by IPv6 ping command. Note that multicast packets are filtred through the
+ * multicast hash table, so not all multicast packets can wake up the host.
+ */
+static void hw_add_wol_mcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[3], &hw->override_addr[3], 3);
+ hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
+}
+
+/**
+ * hw_add_wol_ucast - add unicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add unicast pattern to wakeup the host.
+ *
+ * It is assumed the unicast packet is directed to the device, as the hardware
+ * can only receive them in normal case.
+ */
+static void hw_add_wol_ucast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+
+ hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+}
+
+/**
+ * hw_enable_wol - enable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @wol_enable: The Wake-on-LAN settings.
+ * @net_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to enable Wake-on-LAN depending on driver settings.
+ */
+static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
+{
+ hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
+ hw_add_wol_ucast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
+ hw_add_wol_mcast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
+ hw_add_wol_arp(hw, net_addr);
+}
+
+/**
+ * hw_init - check driver is correct for the hardware
+ * @hw: The hardware instance.
+ *
+ * This function checks the hardware is correct for this driver and sets the
+ * hardware up for proper initialization.
+ *
+ * Return number of ports or 0 if not right.
+ */
+static int hw_init(struct ksz_hw *hw)
+{
+ int rc = 0;
+ u16 data;
+ u16 revision;
+
+ /* Set bus speed to 125MHz. */
+ writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
+
+ /* Check KSZ884x chip ID. */
+ data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
+
+ revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
+ data &= KS884X_CHIP_ID_MASK_41;
+ if (REG_CHIP_ID_41 == data)
+ rc = 1;
+ else if (REG_CHIP_ID_42 == data)
+ rc = 2;
+ else
+ return 0;
+
+ /* Setup hardware features or bug workarounds. */
+ if (revision <= 1) {
+ hw->features |= SMALL_PACKET_TX_BUG;
+ if (1 == rc)
+ hw->features |= HALF_DUPLEX_SIGNAL_BUG;
+ }
+ hw->features |= IPV6_CSUM_GEN_HACK;
+ return rc;
+}
+
+/**
+ * hw_reset - reset the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine resets the hardware.
+ */
+static void hw_reset(struct ksz_hw *hw)
+{
+ writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+
+ /* Wait for device to reset. */
+ mdelay(10);
+
+ /* Write 0 to clear device reset. */
+ writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+}
+
+/**
+ * hw_setup - setup the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware for proper operation.
+ */
+static void hw_setup(struct ksz_hw *hw)
+{
+#if SET_DEFAULT_LED
+ u16 data;
+
+ /* Change default LED mode. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+ data &= ~LED_MODE;
+ data |= SET_DEFAULT_LED;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+#endif
+
+ /* Setup transmit control. */
+ hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
+
+ /* Setup receive control. */
+ hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
+ hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
+
+ /* Hardware cannot handle UDP packet in IP fragments. */
+ hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
+
+ if (hw->all_multi)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ if (hw->promiscuous)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+}
+
+/**
+ * hw_setup_intr - setup interrupt mask
+ * @hw: The hardware instance.
+ *
+ * This routine setup the interrupt mask for proper operation.
+ */
+static void hw_setup_intr(struct ksz_hw *hw)
+{
+ hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
+}
+
+static void ksz_check_desc_num(struct ksz_desc_info *info)
+{
+#define MIN_DESC_SHIFT 2
+
+ int alloc = info->alloc;
+ int shift;
+
+ shift = 0;
+ while (!(alloc & 1)) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (alloc != 1 || shift < MIN_DESC_SHIFT) {
+ printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ while (alloc) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (shift < MIN_DESC_SHIFT)
+ shift = MIN_DESC_SHIFT;
+ alloc = 1 << shift;
+ info->alloc = alloc;
+ }
+ info->mask = info->alloc - 1;
+}
+
+static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ int i;
+ u32 phys = desc_info->ring_phys;
+ struct ksz_hw_desc *desc = desc_info->ring_virt;
+ struct ksz_desc *cur = desc_info->ring;
+ struct ksz_desc *previous = NULL;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ cur->phw = desc++;
+ phys += desc_info->size;
+ previous = cur++;
+ previous->phw->next = cpu_to_le32(phys);
+ }
+ previous->phw->next = cpu_to_le32(desc_info->ring_phys);
+ previous->sw.buf.rx.end_of_ring = 1;
+ previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
+
+ desc_info->avail = desc_info->alloc;
+ desc_info->last = desc_info->next = 0;
+
+ desc_info->cur = desc_info->ring;
+}
+
+/**
+ * hw_set_desc_base - set descriptor base addresses
+ * @hw: The hardware instance.
+ * @tx_addr: The transmit descriptor base.
+ * @rx_addr: The receive descriptor base.
+ *
+ * This routine programs the descriptor base addresses after reset.
+ */
+static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
+{
+ /* Set base address of Tx/Rx descriptors. */
+ writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
+ writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
+}
+
+static void hw_reset_pkts(struct ksz_desc_info *info)
+{
+ info->cur = info->ring;
+ info->avail = info->alloc;
+ info->last = info->next = 0;
+}
+
+static inline void hw_resume_rx(struct ksz_hw *hw)
+{
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+}
+
+/**
+ * hw_start_rx - start receiving
+ * @hw: The hardware instance.
+ *
+ * This routine starts the receive function of the hardware.
+ */
+static void hw_start_rx(struct ksz_hw *hw)
+{
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+
+ /* Notify when the receive stops. */
+ hw->intr_mask |= KS884X_INT_RX_STOPPED;
+
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+ hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
+ hw->rx_stop++;
+
+ /* Variable overflows. */
+ if (0 == hw->rx_stop)
+ hw->rx_stop = 2;
+}
+
+/*
+ * hw_stop_rx - stop receiving
+ * @hw: The hardware instance.
+ *
+ * This routine stops the receive function of the hardware.
+ */
+static void hw_stop_rx(struct ksz_hw *hw)
+{
+ hw->rx_stop = 0;
+ hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
+ writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
+}
+
+/**
+ * hw_start_tx - start transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine starts the transmit function of the hardware.
+ */
+static void hw_start_tx(struct ksz_hw *hw)
+{
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_stop_tx - stop transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine stops the transmit function of the hardware.
+ */
+static void hw_stop_tx(struct ksz_hw *hw)
+{
+ writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_disable - disable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine disables the hardware.
+ */
+static void hw_disable(struct ksz_hw *hw)
+{
+ hw_stop_rx(hw);
+ hw_stop_tx(hw);
+ hw->enabled = 0;
+}
+
+/**
+ * hw_enable - enable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine enables the hardware.
+ */
+static void hw_enable(struct ksz_hw *hw)
+{
+ hw_start_tx(hw);
+ hw_start_rx(hw);
+ hw->enabled = 1;
+}
+
+/**
+ * hw_alloc_pkt - allocate enough descriptors for transmission
+ * @hw: The hardware instance.
+ * @length: The length of the packet.
+ * @physical: Number of descriptors required.
+ *
+ * This function allocates descriptors for transmission.
+ *
+ * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
+ */
+static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
+{
+ /* Always leave one descriptor free. */
+ if (hw->tx_desc_info.avail <= 1)
+ return 0;
+
+ /* Allocate a descriptor for transmission and mark it current. */
+ get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
+ hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
+
+ /* Keep track of number of transmit descriptors used so far. */
+ ++hw->tx_int_cnt;
+ hw->tx_size += length;
+
+ /* Cannot hold on too much data. */
+ if (hw->tx_size >= MAX_TX_HELD_SIZE)
+ hw->tx_int_cnt = hw->tx_int_mask + 1;
+
+ if (physical > hw->tx_desc_info.avail)
+ return 1;
+
+ return hw->tx_desc_info.avail;
+}
+
+/**
+ * hw_send_pkt - mark packet for transmission
+ * @hw: The hardware instance.
+ *
+ * This routine marks the packet for transmission in PCI version.
+ */
+static void hw_send_pkt(struct ksz_hw *hw)
+{
+ struct ksz_desc *cur = hw->tx_desc_info.cur;
+
+ cur->sw.buf.tx.last_seg = 1;
+
+ /* Interrupt only after specified number of descriptors used. */
+ if (hw->tx_int_cnt > hw->tx_int_mask) {
+ cur->sw.buf.tx.intr = 1;
+ hw->tx_int_cnt = 0;
+ hw->tx_size = 0;
+ }
+
+ /* KSZ8842 supports port directed transmission. */
+ cur->sw.buf.tx.dest_port = hw->dst_ports;
+
+ release_desc(cur);
+
+ writel(0, hw->io + KS_DMA_TX_START);
+}
+
+static int empty_addr(u8 *addr)
+{
+ u32 *addr1 = (u32 *) addr;
+ u16 *addr2 = (u16 *) &addr[4];
+
+ return 0 == *addr1 && 0 == *addr2;
+}
+
+/**
+ * hw_set_addr - set MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine programs the MAC address of the hardware when the address is
+ * overrided.
+ */
+static void hw_set_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
+ hw->io + KS884X_ADDR_0_OFFSET + i);
+
+ sw_set_addr(hw, hw->override_addr);
+}
+
+/**
+ * hw_read_addr - read MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine retrieves the MAC address of the hardware.
+ */
+static void hw_read_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
+ KS884X_ADDR_0_OFFSET + i);
+
+ if (!hw->mac_override) {
+ memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ if (empty_addr(hw->override_addr)) {
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ hw->override_addr[5] += hw->id;
+ hw_set_addr(hw);
+ }
+ }
+}
+
+static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
+{
+ int i;
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ mac_addr_hi = 0;
+ for (i = 0; i < 2; i++) {
+ mac_addr_hi <<= 8;
+ mac_addr_hi |= mac_addr[i];
+ }
+ mac_addr_hi |= ADD_ADDR_ENABLE;
+ mac_addr_lo = 0;
+ for (i = 2; i < 6; i++) {
+ mac_addr_lo <<= 8;
+ mac_addr_lo |= mac_addr[i];
+ }
+ index *= ADD_ADDR_INCR;
+
+ writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
+ writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
+}
+
+static void hw_set_add_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
+ if (empty_addr(hw->address[i]))
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ else
+ hw_ena_add_addr(hw, i, hw->address[i]);
+ }
+}
+
+static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+ int j = ADDITIONAL_ENTRIES;
+
+ if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ return 0;
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ return 0;
+ if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
+ j = i;
+ }
+ if (j < ADDITIONAL_ENTRIES) {
+ memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ hw_ena_add_addr(hw, j, hw->address[j]);
+ return 0;
+ }
+ return -1;
+}
+
+static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
+ memset(hw->address[i], 0, MAC_ADDR_LEN);
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/**
+ * hw_clr_multicast - clear multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine removes all multicast addresses set in the hardware.
+ */
+static void hw_clr_multicast(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++) {
+ hw->multi_bits[i] = 0;
+
+ writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
+ }
+}
+
+/**
+ * hw_set_grp_addr - set multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine programs multicast addresses for the hardware to accept those
+ * addresses.
+ */
+static void hw_set_grp_addr(struct ksz_hw *hw)
+{
+ int i;
+ int index;
+ int position;
+ int value;
+
+ memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
+
+ for (i = 0; i < hw->multi_list_size; i++) {
+ position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
+ index = position >> 3;
+ value = 1 << (position & 7);
+ hw->multi_bits[index] |= (u8) value;
+ }
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++)
+ writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
+ i);
+}
+
+/**
+ * hw_set_multicast - enable or disable all multicast receiving
+ * @hw: The hardware instance.
+ * @multicast: To turn on or off the all multicast feature.
+ *
+ * This routine enables/disables the hardware to accept all multicast packets.
+ */
+static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (multicast)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ else
+ hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * hw_set_promiscuous - enable or disable promiscuous receiving
+ * @hw: The hardware instance.
+ * @prom: To turn on or off the promiscuous feature.
+ *
+ * This routine enables/disables the hardware to accept all packets.
+ */
+static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (prom)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+ else
+ hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * sw_enable - enable the switch
+ * @hw: The hardware instance.
+ * @enable: The flag to enable or disable the switch
+ *
+ * This routine is used to enable/disable the switch in KSZ8842.
+ */
+static void sw_enable(struct ksz_hw *hw, int enable)
+{
+ int port;
+
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (hw->dev_count > 1) {
+ /* Set port-base vlan membership with host port. */
+ sw_cfg_port_base_vlan(hw, port,
+ HOST_MASK | (1 << port));
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ } else {
+ sw_cfg_port_base_vlan(hw, port, PORT_MASK);
+ port_set_stp_state(hw, port, STP_STATE_FORWARDING);
+ }
+ }
+ if (hw->dev_count > 1)
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ else
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
+
+ if (enable)
+ enable = KS8842_START;
+ writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
+}
+
+/**
+ * sw_setup - setup the switch
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware switch engine for default operation.
+ */
+static void sw_setup(struct ksz_hw *hw)
+{
+ int port;
+
+ sw_set_global_ctrl(hw);
+
+ /* Enable switch broadcast storm protection at 10% percent rate. */
+ sw_init_broad_storm(hw);
+ hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
+ for (port = 0; port < SWITCH_PORT_NUM; port++)
+ sw_ena_broad_storm(hw, port);
+
+ sw_init_prio(hw);
+
+ sw_init_mirror(hw);
+
+ sw_init_prio_rate(hw);
+
+ sw_init_vlan(hw);
+
+ if (hw->features & STP_SUPPORT)
+ sw_init_stp(hw);
+ if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ sw_enable(hw, 1);
+}
+
+/**
+ * ksz_start_timer - start kernel timer
+ * @info: Kernel timer information.
+ * @time: The time tick.
+ *
+ * This routine starts the kernel timer after the specified time tick.
+ */
+static void ksz_start_timer(struct ksz_timer_info *info, int time)
+{
+ info->cnt = 0;
+ info->timer.expires = jiffies + time;
+ add_timer(&info->timer);
+
+ /* infinity */
+ info->max = -1;
+}
+
+/**
+ * ksz_stop_timer - stop kernel timer
+ * @info: Kernel timer information.
+ *
+ * This routine stops the kernel timer.
+ */
+static void ksz_stop_timer(struct ksz_timer_info *info)
+{
+ if (info->max) {
+ info->max = 0;
+ del_timer_sync(&info->timer);
+ }
+}
+
+static void ksz_init_timer(struct ksz_timer_info *info, int period,
+ void (*function)(unsigned long), void *data)
+{
+ info->max = 0;
+ info->period = period;
+ init_timer(&info->timer);
+ info->timer.function = function;
+ info->timer.data = (unsigned long) data;
+}
+
+static void ksz_update_timer(struct ksz_timer_info *info)
+{
+ ++info->cnt;
+ if (info->max > 0) {
+ if (info->cnt < info->max) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ } else
+ info->max = 0;
+ } else if (info->max < 0) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ }
+}
+
+/**
+ * ksz_alloc_soft_desc - allocate software descriptors
+ * @desc_info: Descriptor information structure.
+ * @transmit: Indication that descriptors are for transmit.
+ *
+ * This local function allocates software descriptors for manipulation in
+ * memory.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
+ if (!desc_info->ring)
+ return 1;
+ memset((void *) desc_info->ring, 0,
+ sizeof(struct ksz_desc) * desc_info->alloc);
+ hw_init_desc(desc_info, transmit);
+ return 0;
+}
+
+/**
+ * ksz_alloc_desc - allocate hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local function allocates hardware descriptors for receiving and
+ * transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+ int offset;
+
+ /* Allocate memory for RX & TX descriptors. */
+ adapter->desc_pool.alloc_size =
+ hw->rx_desc_info.size * hw->rx_desc_info.alloc +
+ hw->tx_desc_info.size * hw->tx_desc_info.alloc +
+ DESC_ALIGNMENT;
+
+ adapter->desc_pool.alloc_virt =
+ pci_alloc_consistent(
+ adapter->pdev, adapter->desc_pool.alloc_size,
+ &adapter->desc_pool.dma_addr);
+ if (adapter->desc_pool.alloc_virt == NULL) {
+ adapter->desc_pool.alloc_size = 0;
+ return 1;
+ }
+ memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
+
+ /* Align to the next cache line boundary. */
+ offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
+ (DESC_ALIGNMENT -
+ ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
+ adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
+ adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
+
+ /* Allocate receive/transmit descriptors. */
+ hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ adapter->desc_pool.virt;
+ hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
+ offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
+ hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ (adapter->desc_pool.virt + offset);
+ hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
+
+ if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
+ return 1;
+ if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * free_dma_buf - release DMA buffer resources
+ * @adapter: Adapter information structure.
+ *
+ * This routine is just a helper function to release the DMA buffer resources.
+ */
+static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
+ int direction)
+{
+ pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
+ dev_kfree_skb(dma_buf->skb);
+ dma_buf->skb = NULL;
+ dma_buf->dma = 0;
+}
+
+/**
+ * ksz_init_rx_buffers - initialize receive descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This routine initializes DMA buffers for receiving.
+ */
+static void ksz_init_rx_buffers(struct dev_info *adapter)
+{
+ int i;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_hw *hw = &adapter->hw;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+
+ for (i = 0; i < hw->rx_desc_info.alloc; i++) {
+ get_rx_pkt(info, &desc);
+
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb && dma_buf->len != adapter->mtu)
+ free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
+ dma_buf->len = adapter->mtu;
+ if (!dma_buf->skb)
+ dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
+ if (dma_buf->skb && !dma_buf->dma) {
+ dma_buf->skb->dev = adapter->dev;
+ dma_buf->dma = pci_map_single(
+ adapter->pdev,
+ skb_tail_pointer(dma_buf->skb),
+ dma_buf->len,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ /* Set descriptor. */
+ set_rx_buf(desc, dma_buf->dma);
+ set_rx_len(desc, dma_buf->len);
+ release_desc(desc);
+ }
+}
+
+/**
+ * ksz_alloc_mem - allocate memory for hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This function allocates memory for use by hardware descriptors for receiving
+ * and transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_mem(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Determine the number of receive and transmit descriptors. */
+ hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
+ hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
+
+ /* Determine how many descriptors to skip transmit interrupt. */
+ hw->tx_int_cnt = 0;
+ hw->tx_int_mask = NUM_OF_TX_DESC / 4;
+ if (hw->tx_int_mask > 8)
+ hw->tx_int_mask = 8;
+ while (hw->tx_int_mask) {
+ hw->tx_int_cnt++;
+ hw->tx_int_mask >>= 1;
+ }
+ if (hw->tx_int_cnt) {
+ hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
+ hw->tx_int_cnt = 0;
+ }
+
+ /* Determine the descriptor size. */
+ hw->rx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ hw->tx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
+ printk(KERN_ALERT
+ "Hardware descriptor size not right!\n");
+ ksz_check_desc_num(&hw->rx_desc_info);
+ ksz_check_desc_num(&hw->tx_desc_info);
+
+ /* Allocate descriptors. */
+ if (ksz_alloc_desc(adapter))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ksz_free_desc - free software and hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees the software and hardware descriptors allocated by
+ * ksz_alloc_desc().
+ */
+static void ksz_free_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Reset descriptor. */
+ hw->rx_desc_info.ring_virt = NULL;
+ hw->tx_desc_info.ring_virt = NULL;
+ hw->rx_desc_info.ring_phys = 0;
+ hw->tx_desc_info.ring_phys = 0;
+
+ /* Free memory. */
+ if (adapter->desc_pool.alloc_virt)
+ pci_free_consistent(
+ adapter->pdev,
+ adapter->desc_pool.alloc_size,
+ adapter->desc_pool.alloc_virt,
+ adapter->desc_pool.dma_addr);
+
+ /* Reset resource pool. */
+ adapter->desc_pool.alloc_size = 0;
+ adapter->desc_pool.alloc_virt = NULL;
+
+ kfree(hw->rx_desc_info.ring);
+ hw->rx_desc_info.ring = NULL;
+ kfree(hw->tx_desc_info.ring);
+ hw->tx_desc_info.ring = NULL;
+}
+
+/**
+ * ksz_free_buffers - free buffers used in the descriptors
+ * @adapter: Adapter information structure.
+ * @desc_info: Descriptor information structure.
+ *
+ * This local routine frees buffers used in the DMA buffers.
+ */
+static void ksz_free_buffers(struct dev_info *adapter,
+ struct ksz_desc_info *desc_info, int direction)
+{
+ int i;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_desc *desc = desc_info->ring;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb)
+ free_dma_buf(adapter, dma_buf, direction);
+ desc++;
+ }
+}
+
+/**
+ * ksz_free_mem - free all resources used by descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees all the resources allocated by ksz_alloc_mem().
+ */
+static void ksz_free_mem(struct dev_info *adapter)
+{
+ /* Free transmit buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
+ PCI_DMA_TODEVICE);
+
+ /* Free receive buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
+ PCI_DMA_FROMDEVICE);
+
+ /* Free descriptors. */
+ ksz_free_desc(adapter);
+}
+
+static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
+ u64 *counter)
+{
+ int i;
+ int mib;
+ int port;
+ struct ksz_port_mib *port_mib;
+
+ memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ for (i = 0, port = first; i < cnt; i++, port++) {
+ port_mib = &hw->port_mib[port];
+ for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
+ counter[mib] += port_mib->counter[mib];
+ }
+}
+
+/**
+ * send_packet - send packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This routine is used to send a packet out to the network.
+ */
+static void send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ksz_desc *desc;
+ struct ksz_desc *first;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_dma_buf *dma_buf;
+ int len;
+ int last_frag = skb_shinfo(skb)->nr_frags;
+
+ /*
+ * KSZ8842 with multiple device interfaces needs to be told which port
+ * to send.
+ */
+ if (hw->dev_count > 1)
+ hw->dst_ports = 1 << priv->port.first_port;
+
+ /* Hardware will pad the length to 60. */
+ len = skb->len;
+
+ /* Remember the very first descriptor. */
+ first = info->cur;
+ desc = first;
+
+ dma_buf = DMA_BUFFER(desc);
+ if (last_frag) {
+ int frag;
+ skb_frag_t *this_frag;
+
+ dma_buf->len = skb->len - skb->data_len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag = 0;
+ do {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+
+ /* Get a new descriptor. */
+ get_tx_pkt(info, &desc);
+
+ /* Keep track of descriptors used so far. */
+ ++hw->tx_int_cnt;
+
+ dma_buf = DMA_BUFFER(desc);
+ dma_buf->len = this_frag->size;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev,
+ page_address(this_frag->page) +
+ this_frag->page_offset,
+ dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag++;
+ if (frag == last_frag)
+ break;
+
+ /* Do not release the last descriptor here. */
+ release_desc(desc);
+ } while (1);
+
+ /* current points to the last descriptor. */
+ info->cur = desc;
+
+ /* Release the first descriptor. */
+ release_desc(first);
+ } else {
+ dma_buf->len = len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ (desc)->sw.buf.tx.csum_gen_tcp = 1;
+ (desc)->sw.buf.tx.csum_gen_udp = 1;
+ }
+
+ /*
+ * The last descriptor holds the packet so that it can be returned to
+ * network subsystem after all descriptors are transmitted.
+ */
+ dma_buf->skb = skb;
+
+ hw_send_pkt(hw);
+
+ /* Update transmit statistics. */
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+}
+
+/**
+ * transmit_cleanup - clean up transmit descriptors
+ * @dev: Network device.
+ *
+ * This routine is called to clean up the transmitted buffers.
+ */
+static void transmit_cleanup(struct dev_info *hw_priv, int normal)
+{
+ int last;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct net_device *dev = NULL;
+
+ spin_lock(&hw_priv->hwlock);
+ last = info->last;
+
+ while (info->avail < info->alloc) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[last];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.tx.hw_owned) {
+ if (normal)
+ break;
+ else
+ reset_desc(desc, status);
+ }
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_unmap_single(
+ hw_priv->pdev, dma_buf->dma, dma_buf->len,
+ PCI_DMA_TODEVICE);
+
+ /* This descriptor contains the last buffer in the packet. */
+ if (dma_buf->skb) {
+ dev = dma_buf->skb->dev;
+
+ /* Release the packet back to network subsystem. */
+ dev_kfree_skb_irq(dma_buf->skb);
+ dma_buf->skb = NULL;
+ }
+
+ /* Free the transmitted descriptor. */
+ last++;
+ last &= info->mask;
+ info->avail++;
+ }
+ info->last = last;
+ spin_unlock(&hw_priv->hwlock);
+
+ /* Notify the network subsystem that the packet has been sent. */
+ if (dev)
+ dev->trans_start = jiffies;
+}
+
+/**
+ * transmit_done - transmit done processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit interrupt is triggered, indicating
+ * either a packet is sent successfully or there are transmit errors.
+ */
+static void tx_done(struct dev_info *hw_priv)
+{
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ transmit_cleanup(hw_priv, 1);
+
+ for (port = 0; port < hw->dev_count; port++) {
+ struct net_device *dev = hw->port_info[port].pdev;
+
+ if (netif_running(dev) && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
+}
+
+static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
+{
+ skb->dev = old->dev;
+ skb->protocol = old->protocol;
+ skb->ip_summed = old->ip_summed;
+ skb->csum = old->csum;
+ skb_set_network_header(skb, ETH_HLEN);
+
+ dev_kfree_skb(old);
+}
+
+/**
+ * netdev_tx - send out packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This function is used by the upper network layer to send out a packet.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int left;
+ int num = 1;
+ int rc = 0;
+
+ if (hw->features & SMALL_PACKET_TX_BUG) {
+ struct sk_buff *org_skb = skb;
+
+ if (skb->len <= 48) {
+ if (skb_end_pointer(skb) - skb->data >= 50) {
+ memset(&skb->data[skb->len], 0, 50 - skb->len);
+ skb->len = 50;
+ } else {
+ skb = dev_alloc_skb(50);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ memcpy(skb->data, org_skb->data, org_skb->len);
+ memset(&skb->data[org_skb->len], 0,
+ 50 - org_skb->len);
+ skb->len = 50;
+ copy_old_skb(org_skb, skb);
+ }
+ }
+ }
+
+ spin_lock_irq(&hw_priv->hwlock);
+
+ num = skb_shinfo(skb)->nr_frags + 1;
+ left = hw_alloc_pkt(hw, skb->len, num);
+ if (left) {
+ if (left < num ||
+ ((hw->features & IPV6_CSUM_GEN_HACK) &&
+ (CHECKSUM_PARTIAL == skb->ip_summed) &&
+ (ETH_P_IPV6 == htons(skb->protocol)))) {
+ struct sk_buff *org_skb = skb;
+
+ skb = dev_alloc_skb(org_skb->len);
+ if (!skb) {
+ rc = NETDEV_TX_BUSY;
+ goto unlock;
+ }
+ skb_copy_and_csum_dev(org_skb, skb->data);
+ org_skb->ip_summed = 0;
+ skb->len = org_skb->len;
+ copy_old_skb(org_skb, skb);
+ }
+ send_packet(skb, dev);
+ if (left <= num)
+ netif_stop_queue(dev);
+ } else {
+ /* Stop the transmit queue until packet is allocated. */
+ netif_stop_queue(dev);
+ rc = NETDEV_TX_BUSY;
+ }
+unlock:
+ spin_unlock_irq(&hw_priv->hwlock);
+
+ return rc;
+}
+
+/**
+ * netdev_tx_timeout - transmit timeout processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit timer expires. That indicates the
+ * hardware is not running correctly because transmit interrupts are not
+ * triggered to free up resources so that the transmit routine can continue
+ * sending out packets. The hardware is reset to correct the problem.
+ */
+static void netdev_tx_timeout(struct net_device *dev)
+{
+ static unsigned long last_reset;
+
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ if (hw->dev_count > 1) {
+ /*
+ * Only reset the hardware if time between calls is long
+ * enough.
+ */
+ if (jiffies - last_reset <= dev->watchdog_timeo)
+ hw_priv = NULL;
+ }
+
+ last_reset = jiffies;
+ if (hw_priv) {
+ hw_dis_intr(hw);
+ hw_disable(hw);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+ ksz_init_rx_buffers(hw_priv);
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys,
+ hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ if (hw->all_multi)
+ hw_set_multicast(hw, hw->all_multi);
+ else if (hw->multi_list_size)
+ hw_set_grp_addr(hw);
+
+ if (hw->dev_count > 1) {
+ hw_set_add_addr(hw);
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ struct net_device *port_dev;
+
+ port_set_stp_state(hw, port,
+ STP_STATE_DISABLED);
+
+ port_dev = hw->port_info[port].pdev;
+ if (netif_running(port_dev))
+ port_set_stp_state(hw, port,
+ STP_STATE_SIMPLE);
+ }
+ }
+
+ hw_enable(hw);
+ hw_ena_intr(hw);
+ }
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static inline void csum_verified(struct sk_buff *skb)
+{
+ unsigned short protocol;
+ struct iphdr *iph;
+
+ protocol = skb->protocol;
+ skb_reset_network_header(skb);
+ iph = (struct iphdr *) skb_network_header(skb);
+ if (protocol == htons(ETH_P_8021Q)) {
+ protocol = iph->tot_len;
+ skb_set_network_header(skb, VLAN_HLEN);
+ iph = (struct iphdr *) skb_network_header(skb);
+ }
+ if (protocol == htons(ETH_P_IP)) {
+ if (iph->protocol == IPPROTO_TCP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
+ struct ksz_desc *desc, union desc_stat status)
+{
+ int packet_len;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_dma_buf *dma_buf;
+ struct sk_buff *skb;
+ int rx_status;
+
+ /* Received length includes 4-byte CRC. */
+ packet_len = status.rx.frame_len - 4;
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_dma_sync_single_for_cpu(
+ hw_priv->pdev, dma_buf->dma, packet_len + 4,
+ PCI_DMA_FROMDEVICE);
+
+ do {
+ /* skb->data != skb->head */
+ skb = dev_alloc_skb(packet_len + 2);
+ if (!skb) {
+ priv->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ /*
+ * Align socket buffer in 4-byte boundary for better
+ * performance.
+ */
+ skb_reserve(skb, 2);
+
+ memcpy(skb_put(skb, packet_len),
+ dma_buf->skb->data, packet_len);
+ } while (0);
+
+ skb->dev = dev;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
+ csum_verified(skb);
+
+ /* Update receive statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += packet_len;
+
+ /* Notify upper layer for received packet. */
+ dev->last_rx = jiffies;
+
+ rx_status = netif_rx(skb);
+
+ return 0;
+}
+
+static int dev_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int port_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int dev_rcv_special(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ /*
+ * Receive without error. With receive errors
+ * disabled, packets with receive errors will be
+ * dropped, so no need to check the error bit.
+ */
+ if (!status.rx.error || (status.data &
+ KS_DESC_RX_ERROR_COND) ==
+ KS_DESC_RX_ERROR_TOO_LONG) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ } else {
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* Update receive error statistics. */
+ priv->port.counter[OID_COUNTER_RCV_ERROR]++;
+ }
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static void rx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (!hw->enabled)
+ return;
+ if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
+
+ /* In case receive process is suspended because of overrun. */
+ hw_resume_rx(hw);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
+ spin_unlock_irq(&hw_priv->hwlock);
+ } else {
+ hw_ack_intr(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+}
+
+static void tx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_ack_intr(hw, KS884X_INT_TX_MASK);
+
+ tx_done(hw_priv);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_TX);
+ spin_unlock_irq(&hw_priv->hwlock);
+}
+
+static inline void handle_rx_stop(struct ksz_hw *hw)
+{
+ /* Receive just has been stopped. */
+ if (0 == hw->rx_stop)
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ else if (hw->rx_stop > 1) {
+ if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
+ hw_start_rx(hw);
+ } else {
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ hw->rx_stop = 0;
+ }
+ } else
+ /* Receive just has been started. */
+ hw->rx_stop++;
+}
+
+/**
+ * netdev_intr - interrupt handling
+ * @irq: Interrupt number.
+ * @dev_id: Network device.
+ *
+ * This function is called by upper network layer to signal interrupt.
+ *
+ * Return IRQ_HANDLED if interrupt is handled.
+ */
+static irqreturn_t netdev_intr(int irq, void *dev_id)
+{
+ uint int_enable = 0;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_read_intr(hw, &int_enable);
+
+ /* Not our interrupt! */
+ if (!int_enable)
+ return IRQ_NONE;
+
+ do {
+ hw_ack_intr(hw, int_enable);
+ int_enable &= hw->intr_mask;
+
+ if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
+ hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
+ tasklet_schedule(&hw_priv->tx_tasklet);
+ }
+
+ if (likely(int_enable & KS884X_INT_RX)) {
+ hw_dis_intr_bit(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
+ priv->stats.rx_fifo_errors++;
+ hw_resume_rx(hw);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_PHY)) {
+ struct ksz_port *port = &priv->port;
+
+ hw->features |= LINK_INT_WORKING;
+ port_get_link_speed(port);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
+ handle_rx_stop(hw);
+ break;
+ }
+
+ if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
+ u32 data;
+
+ hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
+ printk(KERN_INFO "Tx stopped\n");
+ data = readl(hw->io + KS_DMA_TX_CTRL);
+ if (!(data & DMA_TX_ENABLE))
+ printk(KERN_INFO "Tx disabled\n");
+ break;
+ }
+ } while (0);
+
+ hw_ena_intr(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Linux network device functions
+ */
+
+static unsigned long next_jiffies;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netdev_netpoll(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ hw_dis_intr(&hw_priv->hw);
+ netdev_intr(dev->irq, dev);
+}
+#endif
+
+static void bridge_change(struct ksz_hw *hw)
+{
+ int port;
+ u8 member;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* No ports in forwarding state. */
+ if (!sw->member) {
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ sw_block_addr(hw);
+ }
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
+ member = HOST_MASK | sw->member;
+ else
+ member = HOST_MASK | (1 << port);
+ if (member != sw->port_cfg[port].member)
+ sw_cfg_port_base_vlan(hw, port, member);
+ }
+}
+
+/**
+ * netdev_close - close network device
+ * @dev: Network device.
+ *
+ * This function process the close operation of network device. This is caused
+ * by the user command "ifconfig ethX down."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int pi;
+
+ netif_stop_queue(dev);
+
+ ksz_stop_timer(&priv->monitor_timer_info);
+
+ /* Need to shut the port manually in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
+
+ /* Port is closed. Need to change bridge setting. */
+ if (hw->features & STP_SUPPORT) {
+ pi = 1 << port->first_port;
+ if (hw->ksz_switch->member & pi) {
+ hw->ksz_switch->member &= ~pi;
+ bridge_change(hw);
+ }
+ }
+ }
+ if (port->first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ if (!hw_priv->wol_enable)
+ port_set_power_saving(port, true);
+
+ if (priv->multicast)
+ --hw->all_multi;
+ if (priv->promiscuous)
+ --hw->promiscuous;
+
+ hw_priv->opened--;
+ if (!(hw_priv->opened)) {
+ ksz_stop_timer(&hw_priv->mib_timer_info);
+ flush_work(&hw_priv->mib_read);
+
+ hw_dis_intr(hw);
+ hw_disable(hw);
+ hw_clr_multicast(hw);
+
+ /* Delay for receive task to stop scheduling itself. */
+ msleep(2000 / HZ);
+
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+ free_irq(dev->irq, hw_priv->dev);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+
+ /* Clean out static MAC table when the switch is shutdown. */
+ if (hw->features & STP_SUPPORT)
+ sw_clr_sta_mac_table(hw);
+ }
+
+ return 0;
+}
+
+static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
+{
+ if (hw->ksz_switch) {
+ u32 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ if (hw->features & RX_HUGE_FRAME)
+ data |= SWITCH_HUGE_PACKET;
+ else
+ data &= ~SWITCH_HUGE_PACKET;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ }
+ if (hw->features & RX_HUGE_FRAME) {
+ hw->rx_cfg |= DMA_RX_ERROR;
+ hw_priv->dev_rcv = dev_rcv_special;
+ } else {
+ hw->rx_cfg &= ~DMA_RX_ERROR;
+ if (hw->dev_count > 1)
+ hw_priv->dev_rcv = port_rcv_packets;
+ else
+ hw_priv->dev_rcv = dev_rcv_packets;
+ }
+}
+
+static int prepare_hardware(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int rc = 0;
+
+ /* Remember the network device that requests interrupts. */
+ hw_priv->dev = dev;
+ rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ return rc;
+ tasklet_enable(&hw_priv->rx_tasklet);
+ tasklet_enable(&hw_priv->tx_tasklet);
+
+ hw->promiscuous = 0;
+ hw->all_multi = 0;
+ hw->multi_list_size = 0;
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ hw_cfg_huge_frame(hw_priv, hw);
+ ksz_init_rx_buffers(hw_priv);
+ return 0;
+}
+
+/**
+ * netdev_open - open network device
+ * @dev: Network device.
+ *
+ * This function process the open operation of network device. This is caused
+ * by the user command "ifconfig ethX up."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int i;
+ int p;
+ int rc = 0;
+
+ priv->multicast = 0;
+ priv->promiscuous = 0;
+
+ /* Reset device statistics. */
+ memset(&priv->stats, 0, sizeof(struct net_device_stats));
+ memset((void *) port->counter, 0,
+ (sizeof(u64) * OID_COUNTER_LAST));
+
+ if (!(hw_priv->opened)) {
+ rc = prepare_hardware(dev);
+ if (rc)
+ return rc;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ if (next_jiffies < jiffies)
+ next_jiffies = jiffies + HZ * 2;
+ else
+ next_jiffies += HZ * 1;
+ hw_priv->counter[i].time = next_jiffies;
+ hw->port_mib[i].state = media_disconnected;
+ port_init_cnt(hw, i);
+ }
+ if (hw->ksz_switch)
+ hw->port_mib[HOST_PORT].state = media_connected;
+ else {
+ hw_add_wol_bcast(hw);
+ hw_cfg_wol_pme(hw, 0);
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ }
+ }
+ port_set_power_saving(port, false);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ /*
+ * Initialize to invalid value so that link detection
+ * is done.
+ */
+ hw->port_info[p].partner = 0xFF;
+ hw->port_info[p].state = media_disconnected;
+ }
+
+ /* Need to open the port in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
+ if (port->first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ }
+
+ port_get_link_speed(port);
+ if (port->force_link)
+ port_force_link_speed(port);
+ else
+ port_set_link_speed(port);
+
+ if (!(hw_priv->opened)) {
+ hw_setup_intr(hw);
+ hw_enable(hw);
+ hw_ena_intr(hw);
+
+ if (hw->mib_port_cnt)
+ ksz_start_timer(&hw_priv->mib_timer_info,
+ hw_priv->mib_timer_info.period);
+ }
+
+ hw_priv->opened++;
+
+ ksz_start_timer(&priv->monitor_timer_info,
+ priv->monitor_timer_info.period);
+
+ priv->media_state = port->linked->state;
+
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* RX errors = rx_errors */
+/* RX dropped = rx_dropped */
+/* RX overruns = rx_fifo_errors */
+/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
+/* TX errors = tx_errors */
+/* TX dropped = tx_dropped */
+/* TX overruns = tx_fifo_errors */
+/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
+/* collisions = collisions */
+
+/**
+ * netdev_query_statistics - query network device statistics
+ * @dev: Network device.
+ *
+ * This function returns the statistics of the network device. The device
+ * needs not be opened.
+ *
+ * Return network device statistics.
+ */
+static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &priv->adapter->hw;
+ struct ksz_port_mib *mib;
+ int i;
+ int p;
+
+ priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
+ priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
+
+ /* Reset to zero to add count later. */
+ priv->stats.multicast = 0;
+ priv->stats.collisions = 0;
+ priv->stats.rx_length_errors = 0;
+ priv->stats.rx_crc_errors = 0;
+ priv->stats.rx_frame_errors = 0;
+ priv->stats.tx_window_errors = 0;
+
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ mib = &hw->port_mib[p];
+
+ priv->stats.multicast += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_MULTICAST];
+
+ priv->stats.collisions += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
+
+ priv->stats.rx_length_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
+ mib->counter[MIB_COUNTER_RX_FRAGMENT] +
+ mib->counter[MIB_COUNTER_RX_OVERSIZE] +
+ mib->counter[MIB_COUNTER_RX_JABBER]);
+ priv->stats.rx_crc_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_CRC_ERR];
+ priv->stats.rx_frame_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
+ mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
+
+ priv->stats.tx_window_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * netdev_set_mac_address - set network device MAC address
+ * @dev: Network device.
+ * @addr: Buffer of MAC address.
+ *
+ * This function is used to set the MAC address of the network device.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct sockaddr *mac = addr;
+ uint interrupt;
+
+ if (priv->port.first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ else {
+ hw->mac_override = 1;
+ memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ }
+
+ memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+
+ interrupt = hw_block_intr(hw);
+
+ if (priv->port.first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ else
+ hw_set_addr(hw);
+ hw_restore_intr(hw, interrupt);
+
+ return 0;
+}
+
+static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_hw *hw, int promiscuous)
+{
+ if (promiscuous != priv->promiscuous) {
+ u8 prev_state = hw->promiscuous;
+
+ if (promiscuous)
+ ++hw->promiscuous;
+ else
+ --hw->promiscuous;
+ priv->promiscuous = promiscuous;
+
+ /* Turn on/off promiscuous mode. */
+ if (hw->promiscuous <= 1 && prev_state <= 1)
+ hw_set_promiscuous(hw, hw->promiscuous);
+
+ /*
+ * Port is not in promiscuous mode, meaning it is released
+ * from the bridge.
+ */
+ if ((hw->features & STP_SUPPORT) && !promiscuous &&
+ dev->br_port) {
+ struct ksz_switch *sw = hw->ksz_switch;
+ int port = priv->port.first_port;
+
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ port = 1 << port;
+ if (sw->member & port) {
+ sw->member &= ~port;
+ bridge_change(hw);
+ }
+ }
+ }
+}
+
+static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
+ int multicast)
+{
+ if (multicast != priv->multicast) {
+ u8 all_multi = hw->all_multi;
+
+ if (multicast)
+ ++hw->all_multi;
+ else
+ --hw->all_multi;
+ priv->multicast = multicast;
+
+ /* Turn on/off all multicast mode. */
+ if (hw->all_multi <= 1 && all_multi <= 1)
+ hw_set_multicast(hw, hw->all_multi);
+ }
+}
+
+/**
+ * netdev_set_rx_mode
+ * @dev: Network device.
+ *
+ * This routine is used to set multicast addresses or put the network device
+ * into promiscuous mode.
+ */
+static void netdev_set_rx_mode(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct dev_mc_list *mc_ptr;
+ int multicast = (dev->flags & IFF_ALLMULTI);
+
+ dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
+
+ if (hw_priv->hw.dev_count > 1)
+ multicast |= (dev->flags & IFF_MULTICAST);
+ dev_set_multicast(priv, hw, multicast);
+
+ /* Cannot use different hashes in multiple device interfaces mode. */
+ if (hw_priv->hw.dev_count > 1)
+ return;
+
+ if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
+ int i = 0;
+
+ /* List too big to support so turn on all multicast mode. */
+ if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (MAX_MULTICAST_LIST != hw->multi_list_size) {
+ hw->multi_list_size = MAX_MULTICAST_LIST;
+ ++hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ return;
+ }
+
+ netdev_for_each_mc_addr(mc_ptr, dev) {
+ if (!(*mc_ptr->dmi_addr & 1))
+ continue;
+ if (i >= MAX_MULTICAST_LIST)
+ break;
+ memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
+ MAC_ADDR_LEN);
+ }
+ hw->multi_list_size = (u8) i;
+ hw_set_grp_addr(hw);
+ } else {
+ if (MAX_MULTICAST_LIST == hw->multi_list_size) {
+ --hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ hw->multi_list_size = 0;
+ hw_clr_multicast(hw);
+ }
+}
+
+static int netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int hw_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Cannot use different MTU in multiple device interfaces mode. */
+ if (hw->dev_count > 1)
+ if (dev != hw_priv->dev)
+ return 0;
+ if (new_mtu < 60)
+ return -EINVAL;
+
+ if (dev->mtu != new_mtu) {
+ hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
+ if (hw_mtu > MAX_RX_BUF_SIZE)
+ return -EINVAL;
+ if (hw_mtu > REGULAR_RX_BUF_SIZE) {
+ hw->features |= RX_HUGE_FRAME;
+ hw_mtu = MAX_RX_BUF_SIZE;
+ } else {
+ hw->features &= ~RX_HUGE_FRAME;
+ hw_mtu = REGULAR_RX_BUF_SIZE;
+ }
+ hw_mtu = (hw_mtu + 3) & ~3;
+ hw_priv->mtu = hw_mtu;
+ dev->mtu = new_mtu;
+ }
+ return 0;
+}
+
+/**
+ * netdev_ioctl - I/O control processing
+ * @dev: Network device.
+ * @ifr: Interface request structure.
+ * @cmd: I/O control code.
+ *
+ * This function is used to process I/O control calls.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int rc;
+ int result = 0;
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (down_interruptible(&priv->proc_sem))
+ return -ERESTARTSYS;
+
+ /* assume success */
+ rc = 0;
+ switch (cmd) {
+ /* Get address of MII PHY in use. */
+ case SIOCGMIIPHY:
+ data->phy_id = priv->id;
+
+ /* Fallthrough... */
+
+ /* Read MII PHY register. */
+ case SIOCGMIIREG:
+ if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_r_phy(hw, port->linked->port_id, data->reg_num,
+ &data->val_out);
+ break;
+
+ /* Write MII PHY register. */
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ result = -EPERM;
+ else if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_w_phy(hw, port->linked->port_id, data->reg_num,
+ data->val_in);
+ break;
+
+ default:
+ result = -EOPNOTSUPP;
+ }
+
+ up(&priv->proc_sem);
+
+ return result;
+}
+
+/*
+ * MII support
+ */
+
+/**
+ * mdio_read - read PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ *
+ * This function returns the PHY register value.
+ *
+ * Return the register value.
+ */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ u16 val_out;
+
+ hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
+ return val_out;
+}
+
+/**
+ * mdio_write - set PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ * @val: The register value.
+ *
+ * This procedure sets the PHY register value.
+ */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int pi;
+
+ for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
+ hw_w_phy(hw, pi, reg_num << 1, val);
+}
+
+/*
+ * ethtool support
+ */
+
+#define EEPROM_SIZE 0x40
+
+static u16 eeprom_data[EEPROM_SIZE] = { 0 };
+
+#define ADVERTISED_ALL \
+ (ADVERTISED_10baseT_Half | \
+ ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | \
+ ADVERTISED_100baseT_Full)
+
+/* These functions use the MII functions in mii.c. */
+
+/**
+ * netdev_get_settings - get network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function queries the PHY and returns its state in the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ mutex_lock(&hw_priv->lock);
+ mii_ethtool_gset(&priv->mii_if, cmd);
+ cmd->advertising |= SUPPORTED_TP;
+ mutex_unlock(&hw_priv->lock);
+
+ /* Save advertised settings for workaround in next function. */
+ priv->advertising = cmd->advertising;
+ return 0;
+}
+
+/**
+ * netdev_set_settings - set network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function sets the PHY according to the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ int rc;
+
+ /*
+ * ethtool utility does not change advertised setting if auto
+ * negotiation is not specified explicitly.
+ */
+ if (cmd->autoneg && priv->advertising == cmd->advertising) {
+ cmd->advertising |= ADVERTISED_ALL;
+ if (10 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half);
+ else if (100 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half);
+ if (0 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else if (1 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ mutex_lock(&hw_priv->lock);
+ if (cmd->autoneg &&
+ (cmd->advertising & ADVERTISED_ALL) ==
+ ADVERTISED_ALL) {
+ port->duplex = 0;
+ port->speed = 0;
+ port->force_link = 0;
+ } else {
+ port->duplex = cmd->duplex + 1;
+ if (cmd->speed != 1000)
+ port->speed = cmd->speed;
+ if (cmd->autoneg)
+ port->force_link = 0;
+ else
+ port->force_link = 1;
+ }
+ rc = mii_ethtool_sset(&priv->mii_if, cmd);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_nway_reset - restart auto-negotiation
+ * @dev: Network device.
+ *
+ * This function restarts the PHY for auto-negotiation.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ int rc;
+
+ mutex_lock(&hw_priv->lock);
+ rc = mii_nway_restart(&priv->mii_if);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_get_link - get network device link status
+ * @dev: Network device.
+ *
+ * This function gets the link status from the PHY.
+ *
+ * Return true if PHY is linked and false otherwise.
+ */
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int rc;
+
+ rc = mii_link_ok(&priv->mii_if);
+ return rc;
+}
+
+/**
+ * netdev_get_drvinfo - get network driver information
+ * @dev: Network device.
+ * @info: Ethtool driver info data structure.
+ *
+ * This procedure returns the driver information.
+ */
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(hw_priv->pdev));
+}
+
+/**
+ * netdev_get_regs_len - get length of register dump
+ * @dev: Network device.
+ *
+ * This function returns the length of the register dump.
+ *
+ * Return length of the register dump.
+ */
+static struct hw_regs {
+ int start;
+ int end;
+} hw_regs_range[] = {
+ { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
+ { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
+ { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
+ { KS884X_SIDER_P, KS8842_SGCR7_P },
+ { KS8842_MACAR1_P, KS8842_TOSR8_P },
+ { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
+ { 0, 0 }
+};
+
+static int netdev_get_regs_len(struct net_device *dev)
+{
+ struct hw_regs *range = hw_regs_range;
+ int regs_len = 0x10 * sizeof(u32);
+
+ while (range->end > range->start) {
+ regs_len += (range->end - range->start + 3) / 4 * 4;
+ range++;
+ }
+ return regs_len;
+}
+
+/**
+ * netdev_get_regs - get register dump
+ * @dev: Network device.
+ * @regs: Ethtool registers data structure.
+ * @ptr: Buffer to store the register values.
+ *
+ * This procedure dumps the register values in the provided buffer.
+ */
+static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int *buf = (int *) ptr;
+ struct hw_regs *range = hw_regs_range;
+ int len;
+
+ mutex_lock(&hw_priv->lock);
+ regs->version = 0;
+ for (len = 0; len < 0x40; len += 4) {
+ pci_read_config_dword(hw_priv->pdev, len, buf);
+ buf++;
+ }
+ while (range->end > range->start) {
+ for (len = range->start; len < range->end; len += 4) {
+ *buf = readl(hw->io + len);
+ buf++;
+ }
+ range++;
+ }
+ mutex_unlock(&hw_priv->lock);
+}
+
+#define WOL_SUPPORT \
+ (WAKE_PHY | WAKE_MAGIC | \
+ WAKE_UCAST | WAKE_MCAST | \
+ WAKE_BCAST | WAKE_ARP)
+
+/**
+ * netdev_get_wol - get Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This procedure returns Wake-on-LAN support.
+ */
+static void netdev_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ wol->supported = hw_priv->wol_support;
+ wol->wolopts = hw_priv->wol_enable;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/**
+ * netdev_set_wol - set Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This function sets Wake-on-LAN support.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ if (wol->wolopts & ~hw_priv->wol_support)
+ return -EINVAL;
+
+ hw_priv->wol_enable = wol->wolopts;
+
+ /* Link wakeup cannot really be disabled. */
+ if (wol->wolopts)
+ hw_priv->wol_enable |= WAKE_PHY;
+ hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
+ return 0;
+}
+
+/**
+ * netdev_get_msglevel - get debug message level
+ * @dev: Network device.
+ *
+ * This function returns current debug message level.
+ *
+ * Return current debug message flags.
+ */
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+/**
+ * netdev_set_msglevel - set debug message level
+ * @dev: Network device.
+ * @value: Debug message flags.
+ *
+ * This procedure sets debug message level.
+ */
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = value;
+}
+
+/**
+ * netdev_get_eeprom_len - get EEPROM length
+ * @dev: Network device.
+ *
+ * This function returns the length of the EEPROM.
+ *
+ * Return length of the EEPROM.
+ */
+static int netdev_get_eeprom_len(struct net_device *dev)
+{
+ return EEPROM_SIZE * 2;
+}
+
+/**
+ * netdev_get_eeprom - get EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Buffer to store the EEPROM data.
+ *
+ * This function dumps the EEPROM data in the provided buffer.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+#define EEPROM_MAGIC 0x10A18842
+
+static int netdev_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u8 *eeprom_byte = (u8 *) eeprom_data;
+ int i;
+ int len;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ eeprom->magic = EEPROM_MAGIC;
+ memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
+
+ return 0;
+}
+
+/**
+ * netdev_set_eeprom - write EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Data buffer.
+ *
+ * This function modifies the EEPROM data one byte at a time.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u16 eeprom_word[EEPROM_SIZE];
+ u8 *eeprom_byte = (u8 *) eeprom_word;
+ int i;
+ int len;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
+ memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
+ for (i = 0; i < EEPROM_SIZE; i++)
+ if (eeprom_word[i] != eeprom_data[i]) {
+ eeprom_data[i] = eeprom_word[i];
+ eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * netdev_get_pauseparam - get flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This procedure returns the PAUSE control flow settings.
+ */
+static void netdev_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
+ if (!hw->ksz_switch) {
+ pause->rx_pause =
+ (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
+ pause->tx_pause =
+ (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
+ } else {
+ pause->rx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
+ pause->tx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
+ }
+}
+
+/**
+ * netdev_set_pauseparam - set flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This function sets the PAUSE control flow settings.
+ * Not implemented yet.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ mutex_lock(&hw_priv->lock);
+ if (pause->autoneg) {
+ if (!pause->rx_pause && !pause->tx_pause)
+ port->flow_ctrl = PHY_NO_FLOW_CTRL;
+ else
+ port->flow_ctrl = PHY_FLOW_CTRL;
+ hw->overrides &= ~PAUSE_FLOW_CTRL;
+ port->force_link = 0;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, 1);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, 1);
+ }
+ port_set_link_speed(port);
+ } else {
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, pause->rx_pause);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, pause->tx_pause);
+ } else
+ set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ return 0;
+}
+
+/**
+ * netdev_get_ringparam - get tx/rx ring parameters
+ * @dev: Network device.
+ * @pause: Ethtool RING settings data structure.
+ *
+ * This procedure returns the TX/RX ring settings.
+ */
+static void netdev_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ ring->tx_max_pending = (1 << 9);
+ ring->tx_pending = hw->tx_desc_info.alloc;
+ ring->rx_max_pending = (1 << 9);
+ ring->rx_pending = hw->rx_desc_info.alloc;
+}
+
+#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
+
+static struct {
+ char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[STATS_LEN] = {
+ { "rx_lo_priority_octets" },
+ { "rx_hi_priority_octets" },
+ { "rx_undersize_packets" },
+ { "rx_fragments" },
+ { "rx_oversize_packets" },
+ { "rx_jabbers" },
+ { "rx_symbol_errors" },
+ { "rx_crc_errors" },
+ { "rx_align_errors" },
+ { "rx_mac_ctrl_packets" },
+ { "rx_pause_packets" },
+ { "rx_bcast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_ucast_packets" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+
+ { "tx_lo_priority_octets" },
+ { "tx_hi_priority_octets" },
+ { "tx_late_collisions" },
+ { "tx_pause_packets" },
+ { "tx_bcast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_ucast_packets" },
+ { "tx_deferred" },
+ { "tx_total_collisions" },
+ { "tx_excessive_collisions" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+/**
+ * netdev_get_strings - get statistics identity strings
+ * @dev: Network device.
+ * @stringset: String set identifier.
+ * @buf: Buffer to store the strings.
+ *
+ * This procedure returns the strings used to identify the statistics.
+ */
+static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (ETH_SS_STATS == stringset)
+ memcpy(buf, &ethtool_stats_keys,
+ ETH_GSTRING_LEN * hw->mib_cnt);
+}
+
+/**
+ * netdev_get_sset_count - get statistics size
+ * @dev: Network device.
+ * @sset: The statistics set number.
+ *
+ * This function returns the size of the statistics to be reported.
+ *
+ * Return size of the statistics to be reported.
+ */
+static int netdev_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return hw->mib_cnt;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * netdev_get_ethtool_stats - get network device statistics
+ * @dev: Network device.
+ * @stats: Ethtool statistics data structure.
+ * @data: Buffer to store the statistics.
+ *
+ * This procedure returns the statistics.
+ */
+static void netdev_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int n_stats = stats->n_stats;
+ int i;
+ int n;
+ int p;
+ int rc;
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+
+ mutex_lock(&hw_priv->lock);
+ n = SWITCH_PORT_NUM;
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ if (media_connected == hw->port_mib[p].state) {
+ hw_priv->counter[p].read = 1;
+
+ /* Remember first port that requests read. */
+ if (n == SWITCH_PORT_NUM)
+ n = p;
+ }
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ if (n < SWITCH_PORT_NUM)
+ schedule_work(&hw_priv->mib_read);
+
+ if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
+ p = n;
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ } else
+ for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
+ if (0 == i) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 2);
+ } else if (hw->port_mib[p].cnt_ptr) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ }
+ }
+
+ get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
+ n = hw->mib_cnt;
+ if (n > n_stats)
+ n = n_stats;
+ n_stats -= n;
+ for (i = 0; i < n; i++)
+ *data++ = counter[i];
+}
+
+/**
+ * netdev_get_rx_csum - get receive checksum support
+ * @dev: Network device.
+ *
+ * This function gets receive checksum support setting.
+ *
+ * Return true if receive checksum is enabled; false otherwise.
+ */
+static u32 netdev_get_rx_csum(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ return hw->rx_cfg &
+ (DMA_RX_CSUM_UDP |
+ DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+}
+
+/**
+ * netdev_set_rx_csum - set receive checksum support
+ * @dev: Network device.
+ * @data: Zero to disable receive checksum support.
+ *
+ * This function sets receive checksum support setting.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ u32 new_setting = hw->rx_cfg;
+
+ if (data)
+ new_setting |=
+ (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ else
+ new_setting &=
+ ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ new_setting &= ~DMA_RX_CSUM_UDP;
+ mutex_lock(&hw_priv->lock);
+ if (new_setting != hw->rx_cfg) {
+ hw->rx_cfg = new_setting;
+ if (hw->enabled)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ }
+ mutex_unlock(&hw_priv->lock);
+ return 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_regs_len = netdev_get_regs_len,
+ .get_regs = netdev_get_regs,
+ .get_wol = netdev_get_wol,
+ .set_wol = netdev_set_wol,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_eeprom_len = netdev_get_eeprom_len,
+ .get_eeprom = netdev_get_eeprom,
+ .set_eeprom = netdev_set_eeprom,
+ .get_pauseparam = netdev_get_pauseparam,
+ .set_pauseparam = netdev_set_pauseparam,
+ .get_ringparam = netdev_get_ringparam,
+ .get_strings = netdev_get_strings,
+ .get_sset_count = netdev_get_sset_count,
+ .get_ethtool_stats = netdev_get_ethtool_stats,
+ .get_rx_csum = netdev_get_rx_csum,
+ .set_rx_csum = netdev_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+};
+
+/*
+ * Hardware monitoring
+ */
+
+static void update_link(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_port *port)
+{
+ if (priv->media_state != port->linked->state) {
+ priv->media_state = port->linked->state;
+ if (netif_running(dev)) {
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+ }
+ }
+}
+
+static void mib_read_work(struct work_struct *work)
+{
+ struct dev_info *hw_priv =
+ container_of(work, struct dev_info, mib_read);
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port_mib *mib;
+ int i;
+
+ next_jiffies = jiffies;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ mib = &hw->port_mib[i];
+
+ /* Reading MIB counters or requested to read. */
+ if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
+
+ /* Need to process receive interrupt. */
+ if (port_r_cnt(hw, i))
+ break;
+ hw_priv->counter[i].read = 0;
+
+ /* Finish reading counters. */
+ if (0 == mib->cnt_ptr) {
+ hw_priv->counter[i].read = 2;
+ wake_up_interruptible(
+ &hw_priv->counter[i].counter);
+ }
+ } else if (jiffies >= hw_priv->counter[i].time) {
+ /* Only read MIB counters when the port is connected. */
+ if (media_connected == mib->state)
+ hw_priv->counter[i].read = 1;
+ next_jiffies += HZ * 1 * hw->mib_port_cnt;
+ hw_priv->counter[i].time = next_jiffies;
+
+ /* Port is just disconnected. */
+ } else if (mib->link_down) {
+ mib->link_down = 0;
+
+ /* Read counters one last time after link is lost. */
+ hw_priv->counter[i].read = 1;
+ }
+ }
+}
+
+static void mib_monitor(unsigned long ptr)
+{
+ struct dev_info *hw_priv = (struct dev_info *) ptr;
+
+ mib_read_work(&hw_priv->mib_read);
+
+ /* This is used to verify Wake-on-LAN is working. */
+ if (hw_priv->pme_wait) {
+ if (hw_priv->pme_wait <= jiffies) {
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ hw_priv->pme_wait = 0;
+ }
+ } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
+
+ /* PME is asserted. Wait 2 seconds to clear it. */
+ hw_priv->pme_wait = jiffies + HZ * 2;
+ }
+
+ ksz_update_timer(&hw_priv->mib_timer_info);
+}
+
+/**
+ * dev_monitor - periodic monitoring
+ * @ptr: Network device pointer.
+ *
+ * This routine is run in a kernel timer to monitor the network device.
+ */
+static void dev_monitor(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ if (!(hw->features & LINK_INT_WORKING))
+ port_get_link_speed(port);
+ update_link(dev, priv, port);
+
+ ksz_update_timer(&priv->monitor_timer_info);
+}
+
+/*
+ * Linux network device interface functions
+ */
+
+/* Driver exported variables */
+
+static int msg_enable;
+
+static char *macaddr = ":";
+static char *mac1addr = ":";
+
+/*
+ * This enables multiple network device mode for KSZ8842, which contains a
+ * switch with two physical ports. Some users like to take control of the
+ * ports for running Spanning Tree Protocol. The driver will create an
+ * additional eth? device for the other port.
+ *
+ * Some limitations are the network devices cannot have different MTU and
+ * multicast hash tables.
+ */
+static int multi_dev;
+
+/*
+ * As most users select multiple network device mode to use Spanning Tree
+ * Protocol, this enables a feature in which most unicast and multicast packets
+ * are forwarded inside the switch and not passed to the host. Only packets
+ * that need the host's attention are passed to it. This prevents the host
+ * wasting CPU time to examine each and every incoming packets and do the
+ * forwarding itself.
+ *
+ * As the hack requires the private bridge header, the driver cannot compile
+ * with just the kernel headers.
+ *
+ * Enabling STP support also turns on multiple network device mode.
+ */
+static int stp;
+
+/*
+ * This enables fast aging in the KSZ8842 switch. Not sure what situation
+ * needs that. However, fast aging is used to flush the dynamic MAC table when
+ * STP suport is enabled.
+ */
+static int fast_aging;
+
+/**
+ * netdev_init - initalize network device.
+ * @dev: Network device.
+ *
+ * This function initializes the network device.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int __init netdev_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
+ dev_monitor, dev);
+
+ /* 500 ms timeout */
+ dev->watchdog_timeo = HZ / 2;
+
+ dev->features |= NETIF_F_IP_CSUM;
+
+ /*
+ * Hardware does not really support IPv6 checksum generation, but
+ * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK.
+ */
+ dev->features |= NETIF_F_IPV6_CSUM;
+ dev->features |= NETIF_F_SG;
+
+ sema_init(&priv->proc_sem, 1);
+
+ priv->mii_if.phy_id_mask = 0x1;
+ priv->mii_if.reg_num_mask = 0x7;
+ priv->mii_if.dev = dev;
+ priv->mii_if.mdio_read = mdio_read;
+ priv->mii_if.mdio_write = mdio_write;
+ priv->mii_if.phy_id = priv->port.first_port + 1;
+
+ priv->msg_enable = netif_msg_init(msg_enable,
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_init = netdev_init,
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_get_stats = netdev_query_statistics,
+ .ndo_start_xmit = netdev_tx,
+ .ndo_tx_timeout = netdev_tx_timeout,
+ .ndo_change_mtu = netdev_change_mtu,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_set_rx_mode = netdev_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = netdev_netpoll,
+#endif
+};
+
+static void netdev_free(struct net_device *dev)
+{
+ if (dev->watchdog_timeo)
+ unregister_netdev(dev);
+
+ free_netdev(dev);
+}
+
+struct platform_info {
+ struct dev_info dev_info;
+ struct net_device *netdev[SWITCH_PORT_NUM];
+};
+
+static int net_device_present;
+
+static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
+{
+ int i;
+ int j;
+ int got_num;
+ int num;
+
+ i = j = num = got_num = 0;
+ while (j < MAC_ADDR_LEN) {
+ if (macaddr[i]) {
+ got_num = 1;
+ if ('0' <= macaddr[i] && macaddr[i] <= '9')
+ num = num * 16 + macaddr[i] - '0';
+ else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
+ num = num * 16 + 10 + macaddr[i] - 'A';
+ else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
+ num = num * 16 + 10 + macaddr[i] - 'a';
+ else if (':' == macaddr[i])
+ got_num = 2;
+ else
+ break;
+ } else if (got_num)
+ got_num = 2;
+ else
+ break;
+ if (2 == got_num) {
+ if (MAIN_PORT == port) {
+ hw_priv->hw.override_addr[j++] = (u8) num;
+ hw_priv->hw.override_addr[5] +=
+ hw_priv->hw.id;
+ } else {
+ hw_priv->hw.ksz_switch->other_addr[j++] =
+ (u8) num;
+ hw_priv->hw.ksz_switch->other_addr[5] +=
+ hw_priv->hw.id;
+ }
+ num = got_num = 0;
+ }
+ i++;
+ }
+ if (MAC_ADDR_LEN == j) {
+ if (MAIN_PORT == port)
+ hw_priv->hw.mac_override = 1;
+ }
+}
+
+#define KS884X_DMA_MASK (~0x0UL)
+
+static void read_other_addr(struct ksz_hw *hw)
+{
+ int i;
+ u16 data[3];
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (i = 0; i < 3; i++)
+ data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
+ if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
+ sw->other_addr[5] = (u8) data[0];
+ sw->other_addr[4] = (u8)(data[0] >> 8);
+ sw->other_addr[3] = (u8) data[1];
+ sw->other_addr[2] = (u8)(data[1] >> 8);
+ sw->other_addr[1] = (u8) data[2];
+ sw->other_addr[0] = (u8)(data[2] >> 8);
+ }
+}
+
+#ifndef PCI_VENDOR_ID_MICREL_KS
+#define PCI_VENDOR_ID_MICREL_KS 0x16c6
+#endif
+
+static int __init pcidev_init(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct dev_priv *priv;
+ struct dev_info *hw_priv;
+ struct ksz_hw *hw;
+ struct platform_info *info;
+ struct ksz_port *port;
+ unsigned long reg_base;
+ unsigned long reg_len;
+ int cnt;
+ int i;
+ int mib_port_count;
+ int pi;
+ int port_count;
+ int result;
+ char banner[80];
+ struct ksz_switch *sw = NULL;
+
+ result = pci_enable_device(pdev);
+ if (result)
+ return result;
+
+ result = -ENODEV;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return result;
+
+ reg_base = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
+ return result;
+
+ if (!request_mem_region(reg_base, reg_len, DRV_NAME))
+ return result;
+ pci_set_master(pdev);
+
+ result = -ENOMEM;
+
+ info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!info)
+ goto pcidev_init_dev_err;
+ memset(info, 0, sizeof(struct platform_info));
+
+ hw_priv = &info->dev_info;
+ hw_priv->pdev = pdev;
+
+ hw = &hw_priv->hw;
+
+ hw->io = ioremap(reg_base, reg_len);
+ if (!hw->io)
+ goto pcidev_init_io_err;
+
+ cnt = hw_init(hw);
+ if (!cnt) {
+ if (msg_enable & NETIF_MSG_PROBE)
+ printk(KERN_ALERT "chip not detected\n");
+ result = -ENODEV;
+ goto pcidev_init_alloc_err;
+ }
+
+ sprintf(banner, "%s\n", version);
+ banner[13] = cnt + '0';
+ ks_info(hw_priv, "%s", banner);
+ ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+
+ /* Assume device is KSZ8841. */
+ hw->dev_count = 1;
+ port_count = 1;
+ mib_port_count = 1;
+ hw->addr_list_size = 0;
+ hw->mib_cnt = PORT_COUNTER_NUM;
+ hw->mib_port_cnt = 1;
+
+ /* KSZ8842 has a switch with multiple ports. */
+ if (2 == cnt) {
+ if (fast_aging)
+ hw->overrides |= FAST_AGING;
+
+ hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
+
+ /* Multiple network device interfaces are required. */
+ if (multi_dev) {
+ hw->dev_count = SWITCH_PORT_NUM;
+ hw->addr_list_size = SWITCH_PORT_NUM - 1;
+ }
+
+ /* Single network device has multiple ports. */
+ if (1 == hw->dev_count) {
+ port_count = SWITCH_PORT_NUM;
+ mib_port_count = SWITCH_PORT_NUM;
+ }
+ hw->mib_port_cnt = TOTAL_PORT_NUM;
+ hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ if (!hw->ksz_switch)
+ goto pcidev_init_alloc_err;
+ memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
+
+ sw = hw->ksz_switch;
+ }
+ for (i = 0; i < hw->mib_port_cnt; i++)
+ hw->port_mib[i].mib_start = 0;
+
+ hw->parent = hw_priv;
+
+ /* Default MTU is 1500. */
+ hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
+
+ if (ksz_alloc_mem(hw_priv))
+ goto pcidev_init_mem_err;
+
+ hw_priv->hw.id = net_device_present;
+
+ spin_lock_init(&hw_priv->hwlock);
+ mutex_init(&hw_priv->lock);
+
+ /* tasklet is enabled. */
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
+
+ /* tasklet_enable will decrement the atomic counter. */
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+
+ for (i = 0; i < TOTAL_PORT_NUM; i++)
+ init_waitqueue_head(&hw_priv->counter[i].counter);
+
+ if (macaddr[0] != ':')
+ get_mac_addr(hw_priv, macaddr, MAIN_PORT);
+
+ /* Read MAC address and initialize override address if not overrided. */
+ hw_read_addr(hw);
+
+ /* Multiple device interfaces mode requires a second MAC address. */
+ if (hw->dev_count > 1) {
+ memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ read_other_addr(hw);
+ if (mac1addr[0] != ':')
+ get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
+ }
+
+ hw_setup(hw);
+ if (hw->ksz_switch)
+ sw_setup(hw);
+ else {
+ hw_priv->wol_support = WOL_SUPPORT;
+ hw_priv->wol_enable = 0;
+ }
+
+ INIT_WORK(&hw_priv->mib_read, mib_read_work);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
+ mib_monitor, hw_priv);
+
+ for (i = 0; i < hw->dev_count; i++) {
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ if (!dev)
+ goto pcidev_init_reg_err;
+ info->netdev[i] = dev;
+
+ priv = netdev_priv(dev);
+ priv->adapter = hw_priv;
+ priv->id = net_device_present++;
+
+ port = &priv->port;
+ port->port_cnt = port_count;
+ port->mib_port_cnt = mib_port_count;
+ port->first_port = i;
+ port->flow_ctrl = PHY_FLOW_CTRL;
+
+ port->hw = hw;
+ port->linked = &hw->port_info[port->first_port];
+
+ for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
+ hw->port_info[pi].port_id = pi;
+ hw->port_info[pi].pdev = dev;
+ hw->port_info[pi].state = media_disconnected;
+ }
+
+ dev->mem_start = (unsigned long) hw->io;
+ dev->mem_end = dev->mem_start + reg_len - 1;
+ dev->irq = pdev->irq;
+ if (MAIN_PORT == i)
+ memcpy(dev->dev_addr, hw_priv->hw.override_addr,
+ MAC_ADDR_LEN);
+ else {
+ memcpy(dev->dev_addr, sw->other_addr,
+ MAC_ADDR_LEN);
+ if (!memcmp(sw->other_addr, hw->override_addr,
+ MAC_ADDR_LEN))
+ dev->dev_addr[5] += port->first_port;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ if (register_netdev(dev))
+ goto pcidev_init_reg_err;
+ port_set_power_saving(port, true);
+ }
+
+ pci_dev_get(hw_priv->pdev);
+ pci_set_drvdata(pdev, info);
+ return 0;
+
+pcidev_init_reg_err:
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ netdev_free(info->netdev[i]);
+ info->netdev[i] = NULL;
+ }
+ }
+
+pcidev_init_mem_err:
+ ksz_free_mem(hw_priv);
+ kfree(hw->ksz_switch);
+
+pcidev_init_alloc_err:
+ iounmap(hw->io);
+
+pcidev_init_io_err:
+ kfree(info);
+
+pcidev_init_dev_err:
+ release_mem_region(reg_base, reg_len);
+
+ return result;
+}
+
+static void pcidev_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+
+ pci_set_drvdata(pdev, NULL);
+
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ for (i = 0; i < hw_priv->hw.dev_count; i++) {
+ if (info->netdev[i])
+ netdev_free(info->netdev[i]);
+ }
+ if (hw_priv->hw.io)
+ iounmap(hw_priv->hw.io);
+ ksz_free_mem(hw_priv);
+ kfree(hw_priv->hw.ksz_switch);
+ pci_dev_put(hw_priv->pdev);
+ kfree(info);
+}
+
+#ifdef CONFIG_PM
+static int pcidev_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ if (hw_priv->wol_enable)
+ hw_cfg_wol_pme(hw, 0);
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netdev_open(dev);
+ netif_device_attach(dev);
+ }
+ }
+ }
+ return 0;
+}
+
+static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ netdev_close(dev);
+ }
+ }
+ }
+ if (hw_priv->wol_enable) {
+ hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
+ hw_cfg_wol_pme(hw, 1);
+ }
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+#endif
+
+static char pcidev_name[] = "ksz884xp";
+
+static struct pci_device_id pcidev_table[] = {
+ { PCI_VENDOR_ID_MICREL_KS, 0x8841,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MICREL_KS, 0x8842,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pcidev_table);
+
+static struct pci_driver pci_device_driver = {
+#ifdef CONFIG_PM
+ .suspend = pcidev_suspend,
+ .resume = pcidev_resume,
+#endif
+ .name = pcidev_name,
+ .id_table = pcidev_table,
+ .probe = pcidev_init,
+ .remove = pcidev_exit
+};
+
+static int __init ksz884x_init_module(void)
+{
+ return pci_register_driver(&pci_device_driver);
+}
+
+static void __exit ksz884x_cleanup_module(void)
+{
+ pci_unregister_driver(&pci_device_driver);
+}
+
+module_init(ksz884x_init_module);
+module_exit(ksz884x_cleanup_module);
+
+MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
+module_param(macaddr, charp, 0);
+module_param(mac1addr, charp, 0);
+module_param(fast_aging, int, 0);
+module_param(multi_dev, int, 0);
+module_param(stp, int, 0);
+MODULE_PARM_DESC(macaddr, "MAC address");
+MODULE_PARM_DESC(mac1addr, "Second MAC address");
+MODULE_PARM_DESC(fast_aging, "Fast aging");
+MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
+MODULE_PARM_DESC(stp, "STP support");
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 8d7d3d4625f6..7b9447646f8a 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1288,7 +1288,7 @@ static void set_multicast_list(struct net_device *dev)
} else {
short multicast_table[4];
int i;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
if(dev->flags&IFF_ALLMULTI)
num_addrs=1;
/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index b77238dbafb8..6eba352c52e0 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -74,7 +74,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b117f7f8b194..973390b82ec2 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -73,7 +73,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
@@ -85,6 +84,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/gfp.h>
/* DEBUG flags
*/
@@ -1094,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev)
return i;
};
- DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
- dev->name, dev->base_addr));
- for (i = 0; i < 6; i++)
- DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
- DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
+ DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
+ dev->name, dev->base_addr, dev->dev_addr,
+ dev->irq));
DEB(DEB_INIT, printk(KERN_INFO
"%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
dev->name, dma, (int)sizeof(struct i596_dma),
@@ -1382,31 +1380,32 @@ static void set_multicast_list(struct net_device *dev)
}
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT) {
cnt = MAX_MC_CNT;
printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
cmd = &dma->mc_cmd;
cmd->cmd.command = SWAP16(CmdMulticastList);
- cmd->mc_cnt = SWAP16(dev->mc_count * 6);
+ cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
- for (dmi = dev->mc_list;
- cnt && dmi != NULL;
- dmi = dmi->next, cnt--, cp += 6) {
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (!cnt--)
+ break;
memcpy(cp, dmi->dmi_addr, 6);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
"%s: Adding address %pM\n",
dev->name, cp));
+ cp += 6;
}
DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 57f25848fe80..56f66f485400 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -907,15 +907,8 @@ static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
struct dev_mc_list *dmi;
- for (dmi=dev->mc_list; dmi; dmi=dmi->next)
- {
- u32 crc;
- if (dmi->dmi_addrlen != ETH_ALEN)
- {
- printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
- continue;
- }
- crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(dmi, dev) {
+ u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
@@ -941,7 +934,7 @@ static void do_set_multicast_list(struct net_device *dev)
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
{
memset(ei_local->mcfilter, 0, 8);
- if (dev->mc_list)
+ if (!netdev_mc_empty(dev))
make_mc_bits(ei_local->mcfilter, dev);
}
else
@@ -975,7 +968,7 @@ static void do_set_multicast_list(struct net_device *dev)
if(dev->flags&IFF_PROMISC)
ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
- else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
else
ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 336e7c7a9275..ba617e3cf1bb 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -49,6 +49,7 @@
#include <linux/in.h>
#include <linux/io.h>
#include <linux/ip.h>
+#include <linux/slab.h>
#include "ll_temac.h"
@@ -134,7 +135,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual addres and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
@@ -224,6 +225,13 @@ static int temac_set_mac_address(struct net_device *ndev, void *address)
return 0;
}
+static int netdev_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ return temac_set_mac_address(ndev, addr->sa_data);
+}
+
static void temac_set_multicast_list(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -232,7 +240,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_lock(&lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+ netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
* We must make the kernel realise we had to move
* into promisc mode or we start all out war on
@@ -242,10 +250,11 @@ static void temac_set_multicast_list(struct net_device *ndev)
ndev->flags |= IFF_PROMISC;
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (ndev->mc_count) {
- struct dev_mc_list *mclist = ndev->mc_list;
- for (i = 0; mclist && i < ndev->mc_count; i++) {
+ } else if (!netdev_mc_empty(ndev)) {
+ struct dev_mc_list *mclist;
+ i = 0;
+ netdev_for_each_mc_addr(mclist, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
@@ -258,7 +267,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
(mclist->dmi_addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
- mclist = mclist->next;
+ i++;
}
} else {
val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
@@ -615,7 +624,7 @@ static void ll_temac_recv(struct net_device *ndev)
while ((bdstat & STS_CTRL_APP0_CMPLT)) {
skb = lp->rx_skb[lp->rx_bd_ci];
- length = cur_p->app4;
+ length = cur_p->app4 & 0x3FFF;
skb_vaddr = virt_to_bus(skb->data);
dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
@@ -768,7 +777,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
- .ndo_set_mac_address = temac_set_mac_address,
+ .ndo_set_mac_address = netdev_set_mac_address,
//.ndo_set_multicast_list = temac_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
@@ -938,6 +947,9 @@ static int __devexit temac_of_remove(struct of_device *op)
static struct of_device_id temac_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
+ { .compatible = "xlnx,xps-ll-temac-2.00.a", },
+ { .compatible = "xlnx,xps-ll-temac-2.02.a", },
+ { .compatible = "xlnx,xps-ll-temac-2.03.a", },
{},
};
MODULE_DEVICE_TABLE(of, temac_of_match);
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index da0e462308d5..5ae28c975b38 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/slab.h>
#include <linux/of_mdio.h>
#include "ll_temac.h"
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b9fcc9819837..72b7949c91b1 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -72,7 +72,8 @@ struct pcpu_lstats {
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct pcpu_lstats *pcpu_lstats, *lb_stats;
+ struct pcpu_lstats __percpu *pcpu_lstats;
+ struct pcpu_lstats *lb_stats;
int len;
skb_orphan(skb);
@@ -80,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
- pcpu_lstats = dev->ml_priv;
+ pcpu_lstats = (void __percpu __force *)dev->ml_priv;
lb_stats = this_cpu_ptr(pcpu_lstats);
len = skb->len;
@@ -95,14 +96,14 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
static struct net_device_stats *loopback_get_stats(struct net_device *dev)
{
- const struct pcpu_lstats *pcpu_lstats;
+ const struct pcpu_lstats __percpu *pcpu_lstats;
struct net_device_stats *stats = &dev->stats;
unsigned long bytes = 0;
unsigned long packets = 0;
unsigned long drops = 0;
int i;
- pcpu_lstats = dev->ml_priv;
+ pcpu_lstats = (void __percpu __force *)dev->ml_priv;
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
@@ -135,19 +136,20 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- struct pcpu_lstats *lstats;
+ struct pcpu_lstats __percpu *lstats;
lstats = alloc_percpu(struct pcpu_lstats);
if (!lstats)
return -ENOMEM;
- dev->ml_priv = lstats;
+ dev->ml_priv = (void __force *)lstats;
return 0;
}
static void loopback_dev_free(struct net_device *dev)
{
- struct pcpu_lstats *lstats = dev->ml_priv;
+ struct pcpu_lstats __percpu *lstats =
+ (void __percpu __force *)dev->ml_priv;
free_percpu(lstats);
free_netdev(dev);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index e20fefc73c8b..3e3cc04defd0 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1253,21 +1253,22 @@ static void set_multicast_list(struct net_device *dev) {
if (i596_debug > 1)
printk ("%s: set multicast list %d\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
+ netdev_mc_count(dev) * 6, GFP_ATOMIC);
if (cmd == NULL) {
printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
return;
}
cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
- for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
- memcpy(cp, dmi,6);
+ netdev_for_each_mc_addr(dmi, dev) {
+ memcpy(cp, dmi->dmi_addr, 6);
cp += 6;
}
if (i596_debug & LOG_SRCDST)
@@ -1277,7 +1278,8 @@ static void set_multicast_list(struct net_device *dev) {
if (lp->set_conf.pa_next != I596_NULL) {
return;
}
- if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ if (netdev_mc_empty(dev) &&
+ !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
lp->i596_config[8] &= ~0x01;
} else {
lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f64..c8e68fde0664 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
/* 2003-12-26: Make sure Asante cards always work. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -26,7 +28,6 @@
#include <linux/ioport.h>
#include <linux/nubus.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -34,31 +35,36 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <asm/hwtest.h>
#include <asm/macints.h>
static char version[] =
- "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+ "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#define ei_inb(port) in_8(port)
-#define ei_outb(val,port) out_8(port,val)
-#define ei_inb_p(port) in_8(port)
-#define ei_outb_p(val,port) out_8(port,val)
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
#include "lib8390.c"
#define WD_START_PG 0x00 /* First page of TX buffer */
#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */
+#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
+ /* First page of TX buffer */
-/* Unfortunately it seems we have to hardcode these for the moment */
-/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */
+/*
+ * Unfortunately it seems we have to hardcode these for the moment
+ * Shouldn't the card know about this?
+ * Does anyone know where to read it off the card?
+ * Do we trust the data provided by the card?
+ */
#define DAYNA_8390_BASE 0x80000
#define DAYNA_8390_MEM 0x00000
@@ -80,7 +86,7 @@ enum mac8390_type {
MAC8390_KINETICS,
};
-static const char * cardname[] = {
+static const char *cardname[] = {
"apple",
"asante",
"farallon",
@@ -90,7 +96,7 @@ static const char * cardname[] = {
"kinetics",
};
-static int word16[] = {
+static const int word16[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -101,7 +107,7 @@ static int word16[] = {
};
/* on which cards do we use NuBus resources? */
-static int useresources[] = {
+static const int useresources[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -117,22 +123,22 @@ enum mac8390_access {
ACCESS_16,
};
-extern int mac8390_memtest(struct net_device * dev);
-static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
+extern int mac8390_memtest(struct net_device *dev);
+static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
enum mac8390_type type);
-static int mac8390_open(struct net_device * dev);
-static int mac8390_close(struct net_device * dev);
+static int mac8390_open(struct net_device *dev);
+static int mac8390_close(struct net_device *dev);
static void mac8390_no_reset(struct net_device *dev);
static void interlan_reset(struct net_device *dev);
/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
-static void sane_block_input(struct net_device * dev, int count,
- struct sk_buff * skb, int ring_offset);
-static void sane_block_output(struct net_device * dev, int count,
- const unsigned char * buf, const int start_page);
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
/* dayna_memcpy to and from card */
static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +154,8 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +170,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
switch (dev->dr_sw) {
- case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_APPLE_SONIC_NB:
- case NUBUS_DRHW_APPLE_SONIC_LC:
- case NUBUS_DRHW_SONNET:
- return MAC8390_NONE;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ case NUBUS_DRSW_3COM:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_APPLE_SONIC_NB:
+ case NUBUS_DRHW_APPLE_SONIC_LC:
+ case NUBUS_DRHW_SONNET:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_ASANTE_LC:
- return MAC8390_NONE;
- break;
- case NUBUS_DRHW_CABLETRON:
- return MAC8390_CABLETRON;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_ASANTE:
- return MAC8390_ASANTE;
+ case NUBUS_DRSW_APPLE:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_ASANTE_LC:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_TECHWORKS:
- case NUBUS_DRSW_DAYNA2:
- case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
- return MAC8390_CABLETRON;
- else
- return MAC8390_APPLE;
+ case NUBUS_DRHW_CABLETRON:
+ return MAC8390_CABLETRON;
break;
-
- case NUBUS_DRSW_FARALLON:
- return MAC8390_FARALLON;
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_INTERLAN:
- return MAC8390_INTERLAN;
- break;
- default:
- return MAC8390_KINETICS;
- break;
- }
- break;
+ case NUBUS_DRSW_ASANTE:
+ return MAC8390_ASANTE;
+ break;
- case NUBUS_DRSW_DAYNA:
- // These correspond to Dayna Sonic cards
- // which use the macsonic driver
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN )
- return MAC8390_NONE;
- else
- return MAC8390_DAYNA;
+ case NUBUS_DRSW_TECHWORKS:
+ case NUBUS_DRSW_DAYNA2:
+ case NUBUS_DRSW_DAYNA_LC:
+ if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ return MAC8390_CABLETRON;
+ else
+ return MAC8390_APPLE;
+ break;
+
+ case NUBUS_DRSW_FARALLON:
+ return MAC8390_FARALLON;
+ break;
+
+ case NUBUS_DRSW_KINETICS:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_INTERLAN:
+ return MAC8390_INTERLAN;
+ break;
+ default:
+ return MAC8390_KINETICS;
break;
+ }
+ break;
+
+ case NUBUS_DRSW_DAYNA:
+ /*
+ * These correspond to Dayna Sonic cards
+ * which use the macsonic driver
+ */
+ if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
+ dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ return MAC8390_NONE;
+ else
+ return MAC8390_DAYNA;
+ break;
}
return MAC8390_NONE;
}
@@ -237,14 +245,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy((char *)membase, (char *)&outdata, 4);
+ memcpy(membase, &outdata, 4);
/* Now compare them */
if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
return ACCESS_32;
/* Write 16 bit output */
- word_memcpy_tocard((char *)membase, (char *)&outdata, 4);
+ word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
- word_memcpy_fromcard((char *)&indata, (char *)membase, 4);
+ word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
return ACCESS_UNKNOWN;
@@ -258,7 +266,7 @@ static int __init mac8390_memsize(unsigned long membase)
local_irq_save(flags);
/* Check up to 32K in 4K increments */
for (i = 0; i < 8; i++) {
- volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000));
+ volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
/* Unwriteable - we have a fully decoded card and the
RAM end located */
@@ -273,28 +281,127 @@ static int __init mac8390_memsize(unsigned long membase)
/* check for partial decode and wrap */
for (j = 0; j < i; j++) {
- volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000));
+ volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
if (*p != (0xA5A0 | j))
break;
- }
- }
+ }
+ }
local_irq_restore(flags);
- /* in any case, we stopped once we tried one block too many,
- or once we reached 32K */
- return i * 0x1000;
+ /*
+ * in any case, we stopped once we tried one block too many,
+ * or once we reached 32K
+ */
+ return i * 0x1000;
+}
+
+static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+ enum mac8390_type cardtype)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+ int offset;
+ volatile unsigned short *i;
+
+ printk_once(KERN_INFO pr_fmt("%s"), version);
+
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+ /* This is getting to be a habit */
+ dev->base_addr = (ndev->board->slot_addr |
+ ((ndev->board->slot & 0xf) << 20));
+
+ /*
+ * Get some Nubus info - we will trust the card's idea
+ * of where its memory and registers are.
+ */
+
+ if (nubus_get_func_dir(ndev, &dir) == -1) {
+ pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+
+ /* Get the MAC address */
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
+ pr_info("%s: Couldn't get MAC address!\n", dev->name);
+ return false;
+ }
+
+ nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+
+ if (useresources[cardtype] == 1) {
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
+ &ent) == -1) {
+ pr_err("%s: Memory offset resource for slot %X not found!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ dev->mem_start = dev->base_addr + offset;
+ /* yes, this is how the Apple driver does it */
+ dev->base_addr = dev->mem_start + 0x10000;
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
+ &ent) == -1) {
+ pr_info("%s: Memory length resource for slot %X not found, probing\n",
+ dev->name, ndev->board->slot);
+ offset = mac8390_memsize(dev->mem_start);
+ } else {
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ }
+ dev->mem_end = dev->mem_start + offset;
+ } else {
+ switch (cardtype) {
+ case MAC8390_KINETICS:
+ case MAC8390_DAYNA: /* it's the same */
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ DAYNA_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ DAYNA_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_INTERLAN:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_CABLETRON:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_MEM);
+ /* The base address is unreadable if 0x00
+ * has been written to the command register
+ * Reset the chip by writing E8390_NODMA +
+ * E8390_PAGE0 + E8390_STOP just to be
+ * sure
+ */
+ i = (void *)dev->base_addr;
+ *i = 0x21;
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+
+ default:
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
+ return false;
+ }
+ }
+
+ return true;
}
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- volatile unsigned short *i;
- int version_disp = 0;
- struct nubus_dev * ndev = NULL;
+ struct nubus_dev *ndev = NULL;
int err = -ENODEV;
- struct nubus_dir dir;
- struct nubus_dirent ent;
- int offset;
static unsigned int slots;
enum mac8390_type cardtype;
@@ -311,118 +418,19 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) {
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
+ ndev))) {
/* Have we seen it already? */
- if (slots & (1<<ndev->board->slot))
+ if (slots & (1 << ndev->board->slot))
continue;
- slots |= 1<<ndev->board->slot;
+ slots |= 1 << ndev->board->slot;
- if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE)
+ cardtype = mac8390_ident(ndev);
+ if (cardtype == MAC8390_NONE)
continue;
- if (version_disp == 0) {
- version_disp = 1;
- printk(version);
- }
-
- dev->irq = SLOT2IRQ(ndev->board->slot);
- /* This is getting to be a habit */
- dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
-
- /* Get some Nubus info - we will trust the card's idea
- of where its memory and registers are. */
-
- if (nubus_get_func_dir(ndev, &dir) == -1) {
- printk(KERN_ERR "%s: Unable to get Nubus functional"
- " directory for slot %X!\n",
- dev->name, ndev->board->slot);
+ if (!mac8390_init(dev, ndev, cardtype))
continue;
- }
-
- /* Get the MAC address */
- if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
- printk(KERN_INFO "%s: Couldn't get MAC address!\n",
- dev->name);
- continue;
- } else {
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
- }
-
- if (useresources[cardtype] == 1) {
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
- printk(KERN_ERR "%s: Memory offset resource"
- " for slot %X not found!\n",
- dev->name, ndev->board->slot);
- continue;
- }
- nubus_get_rsrc_mem(&offset, &ent, 4);
- dev->mem_start = dev->base_addr + offset;
- /* yes, this is how the Apple driver does it */
- dev->base_addr = dev->mem_start + 0x10000;
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
- printk(KERN_INFO "%s: Memory length resource"
- " for slot %X not found"
- ", probing\n",
- dev->name, ndev->board->slot);
- offset = mac8390_memsize(dev->mem_start);
- } else {
- nubus_get_rsrc_mem(&offset, &ent, 4);
- }
- dev->mem_end = dev->mem_start + offset;
- } else {
- switch (cardtype) {
- case MAC8390_KINETICS:
- case MAC8390_DAYNA: /* it's the same */
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_INTERLAN:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_CABLETRON:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_MEM);
- /* The base address is unreadable if 0x00
- * has been written to the command register
- * Reset the chip by writing E8390_NODMA +
- * E8390_PAGE0 + E8390_STOP just to be
- * sure
- */
- i = (void *)dev->base_addr;
- *i = 0x21;
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
-
- default:
- printk(KERN_ERR "Card type %s is"
- " unsupported, sorry\n",
- ndev->board->name);
- continue;
- }
- }
/* Do the nasty 8390 stuff */
if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +466,7 @@ int init_module(void)
dev_mac890[i] = dev;
}
if (!i) {
- printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n");
+ pr_notice("No useable cards found, driver NOT installed.\n");
return -ENODEV;
}
return 0;
@@ -493,22 +501,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
#endif
};
-static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
- enum mac8390_type type)
+static int __init mac8390_initdev(struct net_device *dev,
+ struct nubus_dev *ndev,
+ enum mac8390_type type)
{
- static u32 fwrd4_offsets[16]={
+ static u32 fwrd4_offsets[16] = {
0, 4, 8, 12,
16, 20, 24, 28,
32, 36, 40, 44,
48, 52, 56, 60
};
- static u32 back4_offsets[16]={
+ static u32 back4_offsets[16] = {
60, 56, 52, 48,
44, 40, 36, 32,
28, 24, 20, 16,
12, 8, 4, 0
};
- static u32 fwrd2_offsets[16]={
+ static u32 fwrd2_offsets[16] = {
0, 2, 4, 6,
8, 10, 12, 14,
16, 18, 20, 22,
@@ -526,47 +535,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
/* Cabletron's TX/RX buffers are backwards */
if (type == MAC8390_CABLETRON) {
- ei_status.tx_start_page = CABLETRON_TX_START_PG;
- ei_status.rx_start_page = CABLETRON_RX_START_PG;
- ei_status.stop_page = CABLETRON_RX_STOP_PG;
- ei_status.rmem_start = dev->mem_start;
- ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
+ ei_status.tx_start_page = CABLETRON_TX_START_PG;
+ ei_status.rx_start_page = CABLETRON_RX_START_PG;
+ ei_status.stop_page = CABLETRON_RX_STOP_PG;
+ ei_status.rmem_start = dev->mem_start;
+ ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
} else {
- ei_status.tx_start_page = WD_START_PG;
- ei_status.rx_start_page = WD_START_PG + TX_PAGES;
- ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- ei_status.rmem_end = dev->mem_end;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ ei_status.rmem_end = dev->mem_end;
}
/* Fill in model-specific information and functions */
- switch(type) {
+ switch (type) {
case MAC8390_FARALLON:
case MAC8390_APPLE:
- switch(mac8390_testio(dev->mem_start)) {
- case ACCESS_UNKNOWN:
- printk("Don't know how to access card memory!\n");
- return -ENODEV;
- break;
+ switch (mac8390_testio(dev->mem_start)) {
+ case ACCESS_UNKNOWN:
+ pr_info("Don't know how to access card memory!\n");
+ return -ENODEV;
+ break;
- case ACCESS_16:
- /* 16 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- break;
+ case ACCESS_16:
+ /* 16 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &slow_sane_block_input;
+ ei_status.block_output = &slow_sane_block_output;
+ ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ break;
- case ACCESS_32:
- /* 32 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &sane_block_input;
- ei_status.block_output = &sane_block_output;
- ei_status.get_8390_hdr = &sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- access_bitmode = 1;
- break;
+ case ACCESS_32:
+ /* 32 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &sane_block_input;
+ ei_status.block_output = &sane_block_output;
+ ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 1;
+ break;
}
break;
@@ -608,24 +617,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
ei_status.block_input = &slow_sane_block_input;
ei_status.block_output = &slow_sane_block_output;
ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = fwrd4_offsets;
- break;
+ ei_status.reg_offset = fwrd4_offsets;
+ break;
default:
- printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name);
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
return -ENODEV;
}
__NS8390_init(dev, 0);
/* Good, done, now spit out some messages */
- printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
- dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
- printk(KERN_INFO
- "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
- dev->dev_addr, dev->irq,
- (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
- dev->mem_start, access_bitmode ? 32 : 16);
+ pr_info("%s: %s in slot %X (type %s)\n",
+ dev->name, ndev->board->name, ndev->board->slot,
+ cardname[type]);
+ pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
return 0;
}
@@ -633,7 +643,7 @@ static int mac8390_open(struct net_device *dev)
{
__ei_open(dev);
if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
- printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
return -EAGAIN;
}
return 0;
@@ -650,72 +660,71 @@ static void mac8390_no_reset(struct net_device *dev)
{
ei_status.txing = 0;
if (ei_debug > 1)
- printk("reset not supported\n");
+ pr_info("reset not supported\n");
return;
}
static void interlan_reset(struct net_device *dev)
{
- unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq));
+ unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
if (ei_debug > 1)
- printk("Need to reset the NS8390 t=%lu...", jiffies);
+ pr_info("Need to reset the NS8390 t=%lu...", jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
if (ei_debug > 1)
- printk("reset complete\n");
+ pr_cont("reset complete\n");
return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
/* directly from daynaport.c by Alan Cox */
-static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
+ int count)
{
volatile unsigned char *ptr;
- unsigned char *target=to;
- from<<=1; /* word, skip overhead */
- ptr=(unsigned char *)(dev->mem_start+from);
+ unsigned char *target = to;
+ from <<= 1; /* word, skip overhead */
+ ptr = (unsigned char *)(dev->mem_start+from);
/* Leading byte? */
- if (from&2) {
+ if (from & 2) {
*target++ = ptr[-1];
ptr += 2;
count--;
}
- while(count>=2)
- {
+ while (count >= 2) {
*(unsigned short *)target = *(unsigned short volatile *)ptr;
ptr += 4; /* skip cruft */
target += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
+ if (count)
*target = *ptr;
}
-static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count)
{
volatile unsigned short *ptr;
- const unsigned char *src=from;
- to<<=1; /* word, skip overhead */
- ptr=(unsigned short *)(dev->mem_start+to);
+ const unsigned char *src = from;
+ to <<= 1; /* word, skip overhead */
+ ptr = (unsigned short *)(dev->mem_start+to);
/* Leading byte? */
- if (to&2) { /* avoid a byte write (stomps on other data) */
+ if (to & 2) { /* avoid a byte write (stomps on other data) */
ptr[-1] = (ptr[-1]&0xFF00)|*src++;
ptr++;
count--;
}
- while(count>=2)
- {
- *ptr++=*(unsigned short *)src; /* Copy and */
+ while (count >= 2) {
+ *ptr++ = *(unsigned short *)src; /* Copy and */
ptr++; /* skip cruft */
src += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
- {
+ if (count) {
/* card doesn't like byte writes */
- *ptr=(*ptr&0x00FF)|(*src << 8);
+ *ptr = (*ptr & 0x00FF) | (*src << 8);
}
}
@@ -738,11 +747,14 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
- memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count);
+ memcpy_toio(skb->data + semi_count,
+ (char *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -755,16 +767,18 @@ static void sane_block_output(struct net_device *dev, int count,
}
/* dayna block input/output */
-static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4);
+ dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
/* Fix endianness */
- hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8);
+ hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
}
-static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +786,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
/* Note the offset math is done in card memory space which is word
per long onto our space. */
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +794,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
dayna_memcpy_fromcard(dev, skb->data + semi_count,
ei_status.rmem_start - dev->mem_start,
count);
- }
- else
- {
+ } else {
dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
}
}
-static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -797,40 +809,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
}
/* Cabletron block I/O */
-static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page)
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4);
+ word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
/* Register endianism - fix here rather than 8390.c */
hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
}
-static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, semi_count);
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
word_memcpy_fromcard(skb->data + semi_count,
(char *)ei_status.rmem_start, count);
- }
- else
- {
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, count);
+ } else {
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base, count);
}
}
-static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -843,10 +854,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
const unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +866,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
const volatile unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 23b633e2ac42..c0876e915eed 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -88,7 +88,6 @@ static char *version =
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/nubus.h>
#include <linux/errno.h>
@@ -98,6 +97,7 @@ static char *version =
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -568,9 +568,7 @@ static void set_multicast_list(struct net_device *dev)
if(dev->flags&IFF_PROMISC)
{
lp->rx_mode = RX_ALL_ACCEPT;
- }
- else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
- {
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
/* The multicast-accept list is initialized to accept-all, and we
rely on higher-level filtering for now. */
lp->rx_mode = RX_MULTCAST_ACCEPT;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 1d0d4d9ab623..c8a18a6203c8 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -189,18 +189,11 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = NULL;
+ struct phy_device *phydev;
struct eth_platform_data *pdata;
- int phy_addr;
-
- /* find the first phy */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (bp->mii_bus->phy_map[phy_addr]) {
- phydev = bp->mii_bus->phy_map[phy_addr];
- break;
- }
- }
+ int ret;
+ phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
printk (KERN_ERR "%s: no PHY found\n", dev->name);
return -1;
@@ -210,17 +203,13 @@ static int macb_mii_probe(struct net_device *dev)
/* TODO : add pin_irq */
/* attach the mac to the phy */
- if (pdata && pdata->is_rmii) {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
- } else {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
- }
-
- if (IS_ERR(phydev)) {
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ pdata && pdata->is_rmii ?
+ PHY_INTERFACE_MODE_RMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
+ return ret;
}
/* mask with MAC supported features */
@@ -895,15 +884,12 @@ static void macb_sethashtable(struct net_device *dev)
{
struct dev_mc_list *curr;
unsigned long mc_filter[2];
- unsigned int i, bitnr;
+ unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
mc_filter[0] = mc_filter[1] = 0;
- curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
- if (!curr) break; /* unexpected end of list */
-
+ netdev_for_each_mc_addr(curr, dev) {
bitnr = hash_get_index(curr->dmi_addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -934,7 +920,7 @@ static void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, HRB, -1);
macb_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
macb_sethashtable(dev);
cfg |= MACB_BIT(NCFGR_MTI);
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index d9fbad386389..962c41d0c8df 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -16,6 +16,7 @@
#include <linux/crc32.h>
#include <linux/spinlock.h>
#include <linux/bitrev.h>
+#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
@@ -206,7 +207,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
mp->port_aaui = port_aaui;
else {
/* Apple Network Server uses the AAUI port */
- if (machine_is_compatible("AAPL,ShinerESB"))
+ if (of_machine_is_compatible("AAPL,ShinerESB"))
mp->port_aaui = 1;
else {
#ifdef CONFIG_MACE_AAUI_PORT
@@ -588,7 +589,7 @@ static void mace_set_multicast(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- int i, j;
+ int i;
u32 crc;
unsigned long flags;
@@ -598,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev)
mp->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
@@ -606,11 +607,10 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc_le(6, dmi->dmi_addr);
- j = crc >> 26; /* bit number in multicast_filter */
- multicast_filter[j >> 3] |= 1 << (j & 7);
- dmi = dmi->next;
+ i = crc >> 26; /* bit number in multicast_filter */
+ multicast_filter[i >> 3] |= 1 << (i & 7);
}
}
#if 0
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 44f3c2896f20..52e9a51c4c4f 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -30,6 +30,7 @@
#include <linux/bitrev.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/macintosh.h>
@@ -39,7 +40,6 @@
#include "mace.h"
static char mac_mace_string[] = "macmace";
-static struct platform_device *mac_mace_device;
#define N_TX_BUFF_ORDER 0
#define N_TX_RING (1 << N_TX_BUFF_ORDER)
@@ -496,7 +496,7 @@ static void mace_set_multicast(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
- int i, j;
+ int i;
u32 crc;
u8 maccc;
unsigned long flags;
@@ -509,7 +509,7 @@ static void mace_set_multicast(struct net_device *dev)
mb->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++) {
@@ -518,11 +518,11 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc_le(6, dmi->dmi_addr);
- j = crc >> 26; /* bit number in multicast_filter */
- multicast_filter[j >> 3] |= 1 << (j & 7);
- dmi = dmi->next;
+ /* bit number in multicast_filter */
+ i = crc >> 26;
+ multicast_filter[i >> 3] |= 1 << (i & 7);
}
}
@@ -752,6 +752,7 @@ static irqreturn_t mace_dma_intr(int irq, void *dev_id)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
+MODULE_ALIAS("platform:macmace");
static int __devexit mac_mace_device_remove (struct platform_device *pdev)
{
@@ -777,47 +778,22 @@ static struct platform_driver mac_mace_driver = {
.probe = mace_probe,
.remove = __devexit_p(mac_mace_device_remove),
.driver = {
- .name = mac_mace_string,
+ .name = mac_mace_string,
+ .owner = THIS_MODULE,
},
};
static int __init mac_mace_init_module(void)
{
- int err;
-
if (!MACH_IS_MAC)
return -ENODEV;
- if ((err = platform_driver_register(&mac_mace_driver))) {
- printk(KERN_ERR "Driver registration failed\n");
- return err;
- }
-
- mac_mace_device = platform_device_alloc(mac_mace_string, 0);
- if (!mac_mace_device)
- goto out_unregister;
-
- if (platform_device_add(mac_mace_device)) {
- platform_device_put(mac_mace_device);
- mac_mace_device = NULL;
- }
-
- return 0;
-
-out_unregister:
- platform_driver_unregister(&mac_mace_driver);
-
- return -ENOMEM;
+ return platform_driver_register(&mac_mace_driver);
}
static void __exit mac_mace_cleanup_module(void)
{
platform_driver_unregister(&mac_mace_driver);
-
- if (mac_mace_device) {
- platform_device_unregister(mac_mace_device);
- mac_mace_device = NULL;
- }
}
module_init(mac_mace_init_module);
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 875d361fb79d..adb54fe2d82a 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -35,11 +35,11 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/nubus.h>
@@ -50,6 +50,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/bitrev.h>
+#include <linux/slab.h>
#include <asm/bootinfo.h>
#include <asm/system.h>
@@ -62,7 +63,6 @@
#include <asm/mac_via.h>
static char mac_sonic_string[] = "macsonic";
-static struct platform_device *mac_sonic_device;
#include "sonic.h"
@@ -607,6 +607,7 @@ out:
MODULE_DESCRIPTION("Macintosh SONIC ethernet driver");
module_param(sonic_debug, int, 0);
MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
+MODULE_ALIAS("platform:macsonic");
#include "sonic.c"
@@ -627,44 +628,19 @@ static struct platform_driver mac_sonic_driver = {
.probe = mac_sonic_probe,
.remove = __devexit_p(mac_sonic_device_remove),
.driver = {
- .name = mac_sonic_string,
+ .name = mac_sonic_string,
+ .owner = THIS_MODULE,
},
};
static int __init mac_sonic_init_module(void)
{
- int err;
-
- if ((err = platform_driver_register(&mac_sonic_driver))) {
- printk(KERN_ERR "Driver registration failed\n");
- return err;
- }
-
- mac_sonic_device = platform_device_alloc(mac_sonic_string, 0);
- if (!mac_sonic_device)
- goto out_unregister;
-
- if (platform_device_add(mac_sonic_device)) {
- platform_device_put(mac_sonic_device);
- mac_sonic_device = NULL;
- }
-
- return 0;
-
-out_unregister:
- platform_driver_unregister(&mac_sonic_driver);
-
- return -ENOMEM;
+ return platform_driver_register(&mac_sonic_driver);
}
static void __exit mac_sonic_cleanup_module(void)
{
platform_driver_unregister(&mac_sonic_driver);
-
- if (mac_sonic_device) {
- platform_device_unregister(mac_sonic_device);
- mac_sonic_device = NULL;
- }
}
module_init(mac_sonic_init_module);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 21a9c9ab4b34..40faa368b07a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,31 +39,6 @@ struct macvlan_port {
struct list_head vlans;
};
-/**
- * struct macvlan_rx_stats - MACVLAN percpu rx stats
- * @rx_packets: number of received packets
- * @rx_bytes: number of received bytes
- * @multicast: number of received multicast packets
- * @rx_errors: number of errors
- */
-struct macvlan_rx_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long multicast;
- unsigned long rx_errors;
-};
-
-struct macvlan_dev {
- struct net_device *dev;
- struct list_head list;
- struct hlist_node hlist;
- struct macvlan_port *port;
- struct net_device *lowerdev;
- struct macvlan_rx_stats *rx_stats;
- enum macvlan_mode mode;
-};
-
-
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
@@ -118,31 +93,17 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
return 0;
}
-static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
- unsigned int len, bool success,
- bool multicast)
-{
- struct macvlan_rx_stats *rx_stats;
-
- rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
- if (likely(success)) {
- rx_stats->rx_packets++;;
- rx_stats->rx_bytes += len;
- if (multicast)
- rx_stats->multicast++;
- } else {
- rx_stats->rx_errors++;
- }
-}
-static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
+static int macvlan_broadcast_one(struct sk_buff *skb,
+ const struct macvlan_dev *vlan,
const struct ethhdr *eth, bool local)
{
+ struct net_device *dev = vlan->dev;
if (!skb)
return NET_RX_DROP;
if (local)
- return dev_forward_skb(dev, skb);
+ return vlan->forward(dev, skb);
skb->dev = dev;
if (!compare_ether_addr_64bits(eth->h_dest,
@@ -151,7 +112,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
else
skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb);
+ return vlan->receive(skb);
}
static void macvlan_broadcast(struct sk_buff *skb,
@@ -175,7 +136,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
continue;
nskb = skb_clone(skb, GFP_ATOMIC);
- err = macvlan_broadcast_one(nskb, vlan->dev, eth,
+ err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
@@ -238,7 +199,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- netif_rx(skb);
+ vlan->receive(skb);
return NULL;
}
@@ -260,7 +221,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
unsigned int length = skb->len + ETH_HLEN;
- int ret = dev_forward_skb(dest->dev, skb);
+ int ret = dest->forward(dest->dev, skb);
macvlan_count_rx(dest, length,
ret == NET_RX_SUCCESS, 0);
@@ -269,12 +230,12 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
xmit_world:
- skb->dev = vlan->lowerdev;
+ skb_set_dev(skb, vlan->lowerdev);
return dev_queue_xmit(skb);
}
-static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
int i = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -290,6 +251,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
return ret;
}
+EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
@@ -418,7 +380,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6)
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -623,8 +585,11 @@ static int macvlan_get_tx_queues(struct net *net,
return 0;
}
-static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ int (*receive)(struct sk_buff *skb),
+ int (*forward)(struct net_device *dev,
+ struct sk_buff *skb))
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -664,6 +629,8 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
+ vlan->receive = receive;
+ vlan->forward = forward;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
@@ -677,8 +644,17 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
}
+EXPORT_SYMBOL_GPL(macvlan_common_newlink);
-static void macvlan_dellink(struct net_device *dev, struct list_head *head)
+static int macvlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ return macvlan_common_newlink(src_net, dev, tb, data,
+ netif_rx,
+ dev_forward_skb);
+}
+
+void macvlan_dellink(struct net_device *dev, struct list_head *head)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port = vlan->port;
@@ -689,6 +665,7 @@ static void macvlan_dellink(struct net_device *dev, struct list_head *head)
if (list_empty(&port->vlans))
macvlan_port_destroy(port->dev);
}
+EXPORT_SYMBOL_GPL(macvlan_dellink);
static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
@@ -720,19 +697,27 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
};
-static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
+int macvlan_link_register(struct rtnl_link_ops *ops)
+{
+ /* common fields */
+ ops->priv_size = sizeof(struct macvlan_dev);
+ ops->get_tx_queues = macvlan_get_tx_queues;
+ ops->setup = macvlan_setup;
+ ops->validate = macvlan_validate;
+ ops->maxtype = IFLA_MACVLAN_MAX;
+ ops->policy = macvlan_policy;
+ ops->changelink = macvlan_changelink;
+ ops->get_size = macvlan_get_size;
+ ops->fill_info = macvlan_fill_info;
+
+ return rtnl_link_register(ops);
+};
+EXPORT_SYMBOL_GPL(macvlan_link_register);
+
+static struct rtnl_link_ops macvlan_link_ops = {
.kind = "macvlan",
- .priv_size = sizeof(struct macvlan_dev),
- .get_tx_queues = macvlan_get_tx_queues,
- .setup = macvlan_setup,
- .validate = macvlan_validate,
.newlink = macvlan_newlink,
.dellink = macvlan_dellink,
- .maxtype = IFLA_MACVLAN_MAX,
- .policy = macvlan_policy,
- .changelink = macvlan_changelink,
- .get_size = macvlan_get_size,
- .fill_info = macvlan_fill_info,
};
static int macvlan_device_event(struct notifier_block *unused,
@@ -761,7 +746,7 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_UNREGISTER:
list_for_each_entry_safe(vlan, next, &port->vlans, list)
- macvlan_dellink(vlan->dev, NULL);
+ vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
}
return NOTIFY_DONE;
@@ -778,7 +763,7 @@ static int __init macvlan_init_module(void)
register_netdevice_notifier(&macvlan_notifier_block);
macvlan_handle_frame_hook = macvlan_handle_frame;
- err = rtnl_link_register(&macvlan_link_ops);
+ err = macvlan_link_register(&macvlan_link_ops);
if (err < 0)
goto err1;
return 0;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
new file mode 100644
index 000000000000..abba3cc81f12
--- /dev/null
+++ b/drivers/net/macvtap.c
@@ -0,0 +1,804 @@
+#include <linux/etherdevice.h>
+#include <linux/if_macvlan.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+#include <linux/virtio_net.h>
+
+/*
+ * A macvtap queue is the central object of this driver, it connects
+ * an open character device to a macvlan interface. There can be
+ * multiple queues on one interface, which map back to queues
+ * implemented in hardware on the underlying device.
+ *
+ * macvtap_proto is used to allocate queues through the sock allocation
+ * mechanism.
+ *
+ * TODO: multiqueue support is currently not implemented, even though
+ * macvtap is basically prepared for that. We will need to add this
+ * here as well as in virtio-net and qemu to get line rate on 10gbit
+ * adapters from a guest.
+ */
+struct macvtap_queue {
+ struct sock sk;
+ struct socket sock;
+ struct macvlan_dev *vlan;
+ struct file *file;
+ unsigned int flags;
+};
+
+static struct proto macvtap_proto = {
+ .name = "macvtap",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof (struct macvtap_queue),
+};
+
+/*
+ * Minor number matches netdev->ifindex, so need a potentially
+ * large value. This also makes it possible to split the
+ * tap functionality out again in the future by offering it
+ * from other drivers besides macvtap. As long as every device
+ * only has one tap, the interface numbers assure that the
+ * device nodes are unique.
+ */
+static unsigned int macvtap_major;
+#define MACVTAP_NUM_DEVS 65536
+static struct class *macvtap_class;
+static struct cdev macvtap_cdev;
+
+static const struct proto_ops macvtap_socket_ops;
+
+/*
+ * RCU usage:
+ * The macvtap_queue and the macvlan_dev are loosely coupled, the
+ * pointers from one to the other can only be read while rcu_read_lock
+ * or macvtap_lock is held.
+ *
+ * Both the file and the macvlan_dev hold a reference on the macvtap_queue
+ * through sock_hold(&q->sk). When the macvlan_dev goes away first,
+ * q->vlan becomes inaccessible. When the files gets closed,
+ * macvtap_get_queue() fails.
+ *
+ * There may still be references to the struct sock inside of the
+ * queue from outbound SKBs, but these never reference back to the
+ * file or the dev. The data structure is freed through __sk_free
+ * when both our references and any pending SKBs are gone.
+ */
+static DEFINE_SPINLOCK(macvtap_lock);
+
+/*
+ * Choose the next free queue, for now there is only one
+ */
+static int macvtap_set_queue(struct net_device *dev, struct file *file,
+ struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ int err = -EBUSY;
+
+ spin_lock(&macvtap_lock);
+ if (rcu_dereference(vlan->tap))
+ goto out;
+
+ err = 0;
+ rcu_assign_pointer(q->vlan, vlan);
+ rcu_assign_pointer(vlan->tap, q);
+ sock_hold(&q->sk);
+
+ q->file = file;
+ file->private_data = q;
+
+out:
+ spin_unlock(&macvtap_lock);
+ return err;
+}
+
+/*
+ * The file owning the queue got closed, give up both
+ * the reference that the files holds as well as the
+ * one from the macvlan_dev if that still exists.
+ *
+ * Using the spinlock makes sure that we don't get
+ * to the queue again after destroying it.
+ */
+static void macvtap_put_queue(struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan;
+
+ spin_lock(&macvtap_lock);
+ vlan = rcu_dereference(q->vlan);
+ if (vlan) {
+ rcu_assign_pointer(vlan->tap, NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ sock_put(&q->sk);
+ }
+
+ spin_unlock(&macvtap_lock);
+
+ synchronize_rcu();
+ sock_put(&q->sk);
+}
+
+/*
+ * Since we only support one queue, just dereference the pointer.
+ */
+static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ return rcu_dereference(vlan->tap);
+}
+
+/*
+ * The net_device is going away, give up the reference
+ * that it holds on the queue (all the queues one day)
+ * and safely set the pointer from the queues to NULL.
+ */
+static void macvtap_del_queues(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvtap_queue *q;
+
+ spin_lock(&macvtap_lock);
+ q = rcu_dereference(vlan->tap);
+ if (!q) {
+ spin_unlock(&macvtap_lock);
+ return;
+ }
+
+ rcu_assign_pointer(vlan->tap, NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ spin_unlock(&macvtap_lock);
+
+ synchronize_rcu();
+ sock_put(&q->sk);
+}
+
+/*
+ * Forward happens for data that gets sent from one macvlan
+ * endpoint to another one in bridge mode. We just take
+ * the skb and put it into the receive queue.
+ */
+static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+{
+ struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ if (!q)
+ return -ENOLINK;
+
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
+ return 0;
+}
+
+/*
+ * Receive is for data from the external interface (lowerdev),
+ * in case of macvtap, we can treat that the same way as
+ * forward, which macvlan cannot.
+ */
+static int macvtap_receive(struct sk_buff *skb)
+{
+ skb_push(skb, ETH_HLEN);
+ return macvtap_forward(skb->dev, skb);
+}
+
+static int macvtap_newlink(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct device *classdev;
+ dev_t devt;
+ int err;
+
+ err = macvlan_common_newlink(src_net, dev, tb, data,
+ macvtap_receive, macvtap_forward);
+ if (err)
+ goto out;
+
+ devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
+
+ classdev = device_create(macvtap_class, &dev->dev, devt,
+ dev, "tap%d", dev->ifindex);
+ if (IS_ERR(classdev)) {
+ err = PTR_ERR(classdev);
+ macvtap_del_queues(dev);
+ }
+
+out:
+ return err;
+}
+
+static void macvtap_dellink(struct net_device *dev,
+ struct list_head *head)
+{
+ device_destroy(macvtap_class,
+ MKDEV(MAJOR(macvtap_major), dev->ifindex));
+
+ macvtap_del_queues(dev);
+ macvlan_dellink(dev, head);
+}
+
+static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
+ .kind = "macvtap",
+ .newlink = macvtap_newlink,
+ .dellink = macvtap_dellink,
+};
+
+
+static void macvtap_sock_write_space(struct sock *sk)
+{
+ if (!sock_writeable(sk) ||
+ !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ return;
+
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
+}
+
+static int macvtap_open(struct inode *inode, struct file *file)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct net_device *dev = dev_get_by_index(net, iminor(inode));
+ struct macvtap_queue *q;
+ int err;
+
+ err = -ENODEV;
+ if (!dev)
+ goto out;
+
+ /* check if this is a macvtap device */
+ err = -EINVAL;
+ if (dev->rtnl_link_ops != &macvtap_link_ops)
+ goto out;
+
+ err = -ENOMEM;
+ q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+ &macvtap_proto);
+ if (!q)
+ goto out;
+
+ init_waitqueue_head(&q->sock.wait);
+ q->sock.type = SOCK_RAW;
+ q->sock.state = SS_CONNECTED;
+ q->sock.file = file;
+ q->sock.ops = &macvtap_socket_ops;
+ sock_init_data(&q->sock, &q->sk);
+ q->sk.sk_write_space = macvtap_sock_write_space;
+ q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+
+ err = macvtap_set_queue(dev, file, q);
+ if (err)
+ sock_put(&q->sk);
+
+out:
+ if (dev)
+ dev_put(dev);
+
+ return err;
+}
+
+static int macvtap_release(struct inode *inode, struct file *file)
+{
+ struct macvtap_queue *q = file->private_data;
+ macvtap_put_queue(q);
+ return 0;
+}
+
+static unsigned int macvtap_poll(struct file *file, poll_table * wait)
+{
+ struct macvtap_queue *q = file->private_data;
+ unsigned int mask = POLLERR;
+
+ if (!q)
+ goto out;
+
+ mask = 0;
+ poll_wait(file, &q->sock.wait, wait);
+
+ if (!skb_queue_empty(&q->sk.sk_receive_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sock_writeable(&q->sk) ||
+ (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
+ sock_writeable(&q->sk)))
+ mask |= POLLOUT | POLLWRNORM;
+
+out:
+ return mask;
+}
+
+static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
+ size_t len, size_t linear,
+ int noblock, int *err)
+{
+ struct sk_buff *skb;
+
+ /* Under a page? Don't bother with paged skb. */
+ if (prepad + len < PAGE_SIZE || !linear)
+ linear = len;
+
+ skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
+ err);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, prepad);
+ skb_put(skb, linear);
+ skb->data_len = len - linear;
+ skb->len += len - linear;
+
+ return skb;
+}
+
+/*
+ * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
+ * be shared with the tun/tap driver.
+ */
+static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ struct virtio_net_hdr *vnet_hdr)
+{
+ unsigned short gso_type = 0;
+ if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ gso_type = SKB_GSO_TCPV4;
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ gso_type |= SKB_GSO_TCP_ECN;
+
+ if (vnet_hdr->gso_size == 0)
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
+ vnet_hdr->csum_offset))
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
+ skb_shinfo(skb)->gso_type = gso_type;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+ }
+ return 0;
+}
+
+static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ struct virtio_net_hdr *vnet_hdr)
+{
+ memset(vnet_hdr, 0, sizeof(*vnet_hdr));
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ /* This is a hint as to how much should be linear. */
+ vnet_hdr->hdr_len = skb_headlen(skb);
+ vnet_hdr->gso_size = sinfo->gso_size;
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ BUG();
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+ } else
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ vnet_hdr->csum_start = skb->csum_start -
+ skb_headroom(skb);
+ vnet_hdr->csum_offset = skb->csum_offset;
+ } /* else everything is zero */
+
+ return 0;
+}
+
+
+/* Get packet from user space buffer */
+static ssize_t macvtap_get_user(struct macvtap_queue *q,
+ const struct iovec *iv, size_t count,
+ int noblock)
+{
+ struct sk_buff *skb;
+ struct macvlan_dev *vlan;
+ size_t len = count;
+ int err;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int vnet_hdr_len = 0;
+
+ if (q->flags & IFF_VNET_HDR) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+ err = -EINVAL;
+ if ((len -= vnet_hdr_len) < 0)
+ goto err;
+
+ err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
+ vnet_hdr_len);
+ if (err < 0)
+ goto err;
+ if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
+ vnet_hdr.hdr_len)
+ vnet_hdr.hdr_len = vnet_hdr.csum_start +
+ vnet_hdr.csum_offset + 2;
+ err = -EINVAL;
+ if (vnet_hdr.hdr_len > len)
+ goto err;
+ }
+
+ err = -EINVAL;
+ if (unlikely(len < ETH_HLEN))
+ goto err;
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
+ noblock, &err);
+ if (!skb)
+ goto err;
+
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len);
+ if (err)
+ goto err_kfree;
+
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ if (vnet_hdr_len) {
+ err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
+ if (err)
+ goto err_kfree;
+ }
+
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ macvlan_start_xmit(skb, vlan->dev);
+ else
+ kfree_skb(skb);
+ rcu_read_unlock_bh();
+
+ return count;
+
+err_kfree:
+ kfree_skb(skb);
+
+err:
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+ rcu_read_unlock_bh();
+
+ return err;
+}
+
+static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t result = -ENOLINK;
+ struct macvtap_queue *q = file->private_data;
+
+ result = macvtap_get_user(q, iv, iov_length(iv, count),
+ file->f_flags & O_NONBLOCK);
+ return result;
+}
+
+/* Put packet to the user space buffer */
+static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ const struct sk_buff *skb,
+ const struct iovec *iv, int len)
+{
+ struct macvlan_dev *vlan;
+ int ret;
+ int vnet_hdr_len = 0;
+
+ if (q->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr vnet_hdr;
+ vnet_hdr_len = sizeof (vnet_hdr);
+ if ((len -= vnet_hdr_len) < 0)
+ return -EINVAL;
+
+ ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
+ if (ret)
+ return ret;
+
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
+ return -EFAULT;
+ }
+
+ len = min_t(int, skb->len, len);
+
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
+
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ macvlan_count_rx(vlan, len, ret == 0, 0);
+ rcu_read_unlock_bh();
+
+ return ret ? ret : (len + vnet_hdr_len);
+}
+
+static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+ const struct iovec *iv, unsigned long len,
+ int noblock)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb;
+ ssize_t ret = 0;
+
+ add_wait_queue(q->sk.sk_sleep, &wait);
+ while (len) {
+ current->state = TASK_INTERRUPTIBLE;
+
+ /* Read frames from the queue */
+ skb = skb_dequeue(&q->sk.sk_receive_queue);
+ if (!skb) {
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* Nothing to read, let's sleep */
+ schedule();
+ continue;
+ }
+ ret = macvtap_put_user(q, skb, iv, len);
+ kfree_skb(skb);
+ break;
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(q->sk.sk_sleep, &wait);
+ return ret;
+}
+
+static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct macvtap_queue *q = file->private_data;
+ ssize_t len, ret = 0;
+
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
+out:
+ return ret;
+}
+
+/*
+ * provide compatibility with generic tun/tap interface
+ */
+static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct macvtap_queue *q = file->private_data;
+ struct macvlan_dev *vlan;
+ void __user *argp = (void __user *)arg;
+ struct ifreq __user *ifr = argp;
+ unsigned int __user *up = argp;
+ unsigned int u;
+ int ret;
+
+ switch (cmd) {
+ case TUNSETIFF:
+ /* ignore the name, just look at flags */
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+
+ ret = 0;
+ if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
+ ret = -EINVAL;
+ else
+ q->flags = u;
+
+ return ret;
+
+ case TUNGETIFF:
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ dev_hold(vlan->dev);
+ rcu_read_unlock_bh();
+
+ if (!vlan)
+ return -ENOLINK;
+
+ ret = 0;
+ if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ put_user(q->flags, &ifr->ifr_flags))
+ ret = -EFAULT;
+ dev_put(vlan->dev);
+ return ret;
+
+ case TUNGETFEATURES:
+ if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETSNDBUF:
+ if (get_user(u, up))
+ return -EFAULT;
+
+ q->sk.sk_sndbuf = u;
+ return 0;
+
+ case TUNSETOFFLOAD:
+ /* let the user check for future flags */
+ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+ TUN_F_TSO_ECN | TUN_F_UFO))
+ return -EINVAL;
+
+ /* TODO: only accept frames with the features that
+ got enabled for forwarded frames */
+ if (!(q->flags & IFF_VNET_HDR))
+ return -EINVAL;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations macvtap_fops = {
+ .owner = THIS_MODULE,
+ .open = macvtap_open,
+ .release = macvtap_release,
+ .aio_read = macvtap_aio_read,
+ .aio_write = macvtap_aio_write,
+ .poll = macvtap_poll,
+ .llseek = no_llseek,
+ .unlocked_ioctl = macvtap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = macvtap_compat_ioctl,
+#endif
+};
+
+static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
+ return macvtap_get_user(q, m->msg_iov, total_len,
+ m->msg_flags & MSG_DONTWAIT);
+}
+
+static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags)
+{
+ struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+ flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops macvtap_socket_ops = {
+ .sendmsg = macvtap_sendmsg,
+ .recvmsg = macvtap_recvmsg,
+};
+
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *macvtap_get_socket(struct file *file)
+{
+ struct macvtap_queue *q;
+ if (file->f_op != &macvtap_fops)
+ return ERR_PTR(-EINVAL);
+ q = file->private_data;
+ if (!q)
+ return ERR_PTR(-EBADFD);
+ return &q->sock;
+}
+EXPORT_SYMBOL_GPL(macvtap_get_socket);
+
+static int macvtap_init(void)
+{
+ int err;
+
+ err = alloc_chrdev_region(&macvtap_major, 0,
+ MACVTAP_NUM_DEVS, "macvtap");
+ if (err)
+ goto out1;
+
+ cdev_init(&macvtap_cdev, &macvtap_fops);
+ err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
+ if (err)
+ goto out2;
+
+ macvtap_class = class_create(THIS_MODULE, "macvtap");
+ if (IS_ERR(macvtap_class)) {
+ err = PTR_ERR(macvtap_class);
+ goto out3;
+ }
+
+ err = macvlan_link_register(&macvtap_link_ops);
+ if (err)
+ goto out4;
+
+ return 0;
+
+out4:
+ class_unregister(macvtap_class);
+out3:
+ cdev_del(&macvtap_cdev);
+out2:
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+out1:
+ return err;
+}
+module_init(macvtap_init);
+
+static void macvtap_exit(void)
+{
+ rtnl_link_unregister(&macvtap_link_ops);
+ class_unregister(macvtap_class);
+ cdev_del(&macvtap_cdev);
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+}
+module_exit(macvtap_exit);
+
+MODULE_ALIAS_RTNL_LINK("macvtap");
+MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 2af81735386b..9f72cb45f4af 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -51,14 +51,11 @@
static const char *meth_str="SGI O2 Fast Ethernet";
-#define HAVE_TX_TIMEOUT
/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
#define TX_TIMEOUT (400*HZ/1000)
-#ifdef HAVE_TX_TIMEOUT
static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
-#endif
/*
* This structure is private to each device. It is used to pass
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 65ec77dc31f5..23cee7b6af91 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -33,6 +33,7 @@
*/
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/errno.h>
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index ccfe276943f0..7cd34e9c7c7e 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -35,6 +35,7 @@
*/
#include <linux/hardirq.h>
+#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 507e11fce9ed..cbabf14f95d0 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index c48b0f4b17b7..73c3d20c6453 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -35,6 +35,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
index 16256784a943..0dfb4ec8a9dd 100644
--- a/drivers/net/mlx4/en_resources.c
+++ b/drivers/net/mlx4/en_resources.c
@@ -31,6 +31,7 @@
*
*/
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mlx4/qp.h>
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 829b9ec9ff67..8e2fcb7103c3 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -32,6 +32,7 @@
*/
#include <linux/mlx4/cq.h>
+#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
@@ -508,11 +509,11 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
- length, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
- dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
- length, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
+ DMA_FROM_DEVICE);
skb->tail += length;
} else {
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 3d1396af9462..580968f304eb 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -33,6 +33,7 @@
#include <asm/page.h>
#include <linux/mlx4/cq.h>
+#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index bffb7995cb70..7365bf488b81 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -32,6 +32,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 04b382fcb8c8..57288ca1395f 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 0e7eb1038f9f..555067802751 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#include <linux/slab.h>
+
#include "mlx4.h"
struct mlx4_device_context {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 291a505fd4fc..e3e0d54a7c87 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -1023,6 +1024,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
info->port_attr.show = show_port_type;
info->port_attr.store = set_port_type;
+ sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->pdev->dev, &info->port_attr);
if (err) {
@@ -1174,7 +1176,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_port:
- for (port = 1; port <= dev->caps.num_ports; port++)
+ for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_mcg_table(dev);
@@ -1271,7 +1273,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
return __mlx4_init_one(pdev, NULL);
}
-static struct pci_device_id mlx4_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 5ccbce9866fe..c4f88b7ef7b6 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -32,7 +32,6 @@
*/
#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index ca7ab8e7b4cc..3dc69be4949f 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -33,6 +33,7 @@
*/
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index ca25b9dc8378..5caf0115fa5b 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -32,6 +32,8 @@
* SOFTWARE.
*/
+#include <linux/slab.h>
+
#include "mlx4.h"
#include "fw.h"
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 42ab9fc01d3e..ec9350e5f21a 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -33,6 +33,7 @@
* SOFTWARE.
*/
+#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 1377d0dc8f1f..3b07b80a0456 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -32,6 +32,7 @@
*/
#include <linux/mlx4/cmd.h>
+#include <linux/gfp.h>
#include "mlx4.h"
#include "icm.h"
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 1405a170bb43..8613a52ddf17 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -54,8 +54,8 @@
#include <linux/io.h>
#include <linux/types.h>
#include <linux/inet_lro.h>
+#include <linux/slab.h>
#include <asm/system.h>
-#include <linux/list.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -656,6 +656,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
struct sk_buff *skb;
int rx;
struct rx_desc *rx_desc;
+ int size;
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
@@ -678,10 +679,11 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
rx_desc = rxq->rx_desc_area + rx;
+ size = skb->end - skb->data;
rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
- skb->data, mp->skb_size,
+ skb->data, size,
DMA_FROM_DEVICE);
- rx_desc->buf_size = mp->skb_size;
+ rx_desc->buf_size = size;
rxq->rx_skb[rx] = skb;
wmb();
rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
@@ -1695,7 +1697,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
@@ -1793,7 +1795,7 @@ oom:
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
- for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
+ netdev_for_each_mc_addr(addr, dev) {
u8 *a = addr->da_addr;
u32 *table;
int entry;
@@ -2845,6 +2847,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
.ndo_start_xmit = mv643xx_eth_xmit,
.ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
.ndo_set_mac_address = mv643xx_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mv643xx_eth_ioctl,
.ndo_change_mtu = mv643xx_eth_change_mtu,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c
index 93c709d63e2f..3a7ad840d5b5 100644
--- a/drivers/net/mvme147.c
+++ b/drivers/net/mvme147.c
@@ -10,11 +10,11 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/gfp.h>
/* Used for the temporal inet entries and routing */
#include <linux/socket.h>
#include <linux/route.h>
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0d..ecde0876a785 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -38,6 +38,8 @@
* Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -62,6 +64,7 @@
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/log2.h>
+#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <net/tcp.h>
@@ -75,7 +78,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.5.1-1.453"
+#define MYRI10GE_VERSION_STR "1.5.2-1.459"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -819,9 +822,7 @@ static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
- printk(KERN_ERR
- "myri10ge: %s: Failed to set flow control mode\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "Failed to set flow control mode\n");
return status;
}
mgp->pause = pause;
@@ -837,8 +838,7 @@ myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
if (status)
- printk(KERN_ERR "myri10ge: %s: Failed to set promisc mode\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "Failed to set promisc mode\n");
}
static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
@@ -1201,6 +1201,9 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
{
struct page *page;
int idx;
+#if MYRI10GE_ALLOC_SIZE > 4096
+ int end_offset;
+#endif
if (unlikely(rx->watchdog_needed && !watchdog))
return;
@@ -1242,9 +1245,9 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
#if MYRI10GE_ALLOC_SIZE > 4096
/* don't cross a 4KB boundary */
- if ((rx->page_offset >> 12) !=
- ((rx->page_offset + bytes - 1) >> 12))
- rx->page_offset = (rx->page_offset + 4096) & ~4095;
+ end_offset = rx->page_offset + bytes - 1;
+ if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
+ rx->page_offset = end_offset & ~4095;
#endif
rx->fill_cnt++;
@@ -1482,19 +1485,15 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
if (mgp->link_state == MXGEFW_LINK_UP) {
if (netif_msg_link(mgp))
- printk(KERN_INFO
- "myri10ge: %s: link up\n",
- mgp->dev->name);
+ netdev_info(mgp->dev, "link up\n");
netif_carrier_on(mgp->dev);
mgp->link_changes++;
} else {
if (netif_msg_link(mgp))
- printk(KERN_INFO
- "myri10ge: %s: link %s\n",
- mgp->dev->name,
- (link_up == MXGEFW_LINK_MYRINET ?
- "mismatch (Myrinet detected)" :
- "down"));
+ netdev_info(mgp->dev, "link %s\n",
+ link_up == MXGEFW_LINK_MYRINET ?
+ "mismatch (Myrinet detected)" :
+ "down");
netif_carrier_off(mgp->dev);
mgp->link_changes++;
}
@@ -1503,9 +1502,8 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
ntohl(stats->rdma_tags_available)) {
mgp->rdma_tags_available =
ntohl(stats->rdma_tags_available);
- printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
- "%d tags left\n", mgp->dev->name,
- mgp->rdma_tags_available);
+ netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
+ mgp->rdma_tags_available);
}
mgp->down_cnt += stats->link_down;
if (stats->link_down)
@@ -1576,8 +1574,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
if (send_done_count != tx->pkt_done)
myri10ge_tx_done(ss, (int)send_done_count);
if (unlikely(i > myri10ge_max_irq_loops)) {
- printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "irq stuck?\n");
stats->valid = 0;
schedule_work(&mgp->watchdog_work);
}
@@ -1614,16 +1611,14 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
*/
ptr = mgp->product_code_string;
if (ptr == NULL) {
- printk(KERN_ERR "myri10ge: %s: Missing product code\n",
- netdev->name);
+ netdev_err(netdev, "Missing product code\n");
return 0;
}
for (i = 0; i < 3; i++, ptr++) {
ptr = strchr(ptr, '-');
if (ptr == NULL) {
- printk(KERN_ERR "myri10ge: %s: Invalid product "
- "code %s\n", netdev->name,
- mgp->product_code_string);
+ netdev_err(netdev, "Invalid product code %s\n",
+ mgp->product_code_string);
return 0;
}
}
@@ -1695,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev,
if (pause->tx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->tx_pause);
if (pause->rx_pause != mgp->pause)
- return myri10ge_change_pause(mgp, pause->tx_pause);
+ return myri10ge_change_pause(mgp, pause->rx_pause);
if (pause->autoneg != 0)
return -EINVAL;
return 0;
@@ -2009,17 +2004,15 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
mgp->small_bytes + MXGEFW_PAD, 0);
if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
- printk(KERN_ERR
- "myri10ge: %s:slice-%d: alloced only %d small bufs\n",
- dev->name, slice, ss->rx_small.fill_cnt);
+ netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
+ slice, ss->rx_small.fill_cnt);
goto abort_with_rx_small_ring;
}
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
- printk(KERN_ERR
- "myri10ge: %s:slice-%d: alloced only %d big bufs\n",
- dev->name, slice, ss->rx_big.fill_cnt);
+ netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
+ slice, ss->rx_big.fill_cnt);
goto abort_with_rx_big_ring;
}
@@ -2358,7 +2351,7 @@ static int myri10ge_open(struct net_device *dev)
mgp->running = MYRI10GE_ETH_STARTING;
status = myri10ge_reset(mgp);
if (status != 0) {
- printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name);
+ netdev_err(dev, "failed reset\n");
goto abort_with_nothing;
}
@@ -2370,9 +2363,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to set number of slices\n",
- dev->name);
+ netdev_err(dev, "failed to set number of slices\n");
goto abort_with_nothing;
}
/* setup the indirection table */
@@ -2384,9 +2375,7 @@ static int myri10ge_open(struct net_device *dev)
MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
&cmd, 0);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to setup rss tables\n",
- dev->name);
+ netdev_err(dev, "failed to setup rss tables\n");
goto abort_with_nothing;
}
@@ -2400,9 +2389,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
&cmd, 0);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to enable slices\n",
- dev->name);
+ netdev_err(dev, "failed to enable slices\n");
goto abort_with_nothing;
}
}
@@ -2450,9 +2437,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_get_txrx(mgp, slice);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to get ring sizes or locations\n",
- dev->name);
+ netdev_err(dev, "failed to get ring sizes or locations\n");
goto abort_with_rings;
}
status = myri10ge_allocate_rings(ss);
@@ -2465,9 +2450,7 @@ static int myri10ge_open(struct net_device *dev)
if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
status = myri10ge_set_stats(mgp, slice);
if (status) {
- printk(KERN_ERR
- "myri10ge: %s: Couldn't set stats DMA\n",
- dev->name);
+ netdev_err(dev, "Couldn't set stats DMA\n");
goto abort_with_rings;
}
@@ -2498,8 +2481,7 @@ static int myri10ge_open(struct net_device *dev)
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
if (status) {
- printk(KERN_ERR "myri10ge: %s: Couldn't set buffer sizes\n",
- dev->name);
+ netdev_err(dev, "Couldn't set buffer sizes\n");
goto abort_with_rings;
}
@@ -2511,8 +2493,7 @@ static int myri10ge_open(struct net_device *dev)
cmd.data0 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
if (status && status != -ENOSYS) {
- printk(KERN_ERR "myri10ge: %s: Couldn't set TSO mode\n",
- dev->name);
+ netdev_err(dev, "Couldn't set TSO mode\n");
goto abort_with_rings;
}
@@ -2521,8 +2502,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
if (status) {
- printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
- dev->name);
+ netdev_err(dev, "Couldn't bring up link\n");
goto abort_with_rings;
}
@@ -2575,15 +2555,12 @@ static int myri10ge_close(struct net_device *dev)
status =
myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
if (status)
- printk(KERN_ERR
- "myri10ge: %s: Couldn't bring down link\n",
- dev->name);
+ netdev_err(dev, "Couldn't bring down link\n");
wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
HZ);
if (old_down_cnt == mgp->down_cnt)
- printk(KERN_ERR "myri10ge: %s never got down irq\n",
- dev->name);
+ netdev_err(dev, "never got down irq\n");
}
netif_tx_disable(dev);
myri10ge_free_irq(mgp);
@@ -2944,9 +2921,7 @@ abort_linearize:
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
if (skb_is_gso(skb)) {
- printk(KERN_ERR
- "myri10ge: %s: TSO but wanted to linearize?!?!?\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
goto drop;
}
@@ -3043,8 +3018,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
- printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_ENABLE_ALLMULTI,"
- " error status: %d\n", dev->name, err);
+ netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
+ err);
goto abort;
}
@@ -3058,14 +3033,13 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
&cmd, 1);
if (err != 0) {
- printk(KERN_ERR
- "myri10ge: %s: Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
- ", error status: %d\n", dev->name, err);
+ netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
+ err);
goto abort;
}
/* Walk the multicast list, and add each address */
- for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, dev) {
memcpy(data, &mc_list->dmi_addr, 6);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
@@ -3073,18 +3047,16 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
&cmd, 1);
if (err != 0) {
- printk(KERN_ERR "myri10ge: %s: Failed "
- "MXGEFW_JOIN_MULTICAST_GROUP, error status:"
- "%d\t", dev->name, err);
- printk(KERN_ERR "MAC %pM\n", mc_list->dmi_addr);
+ netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
+ err, mc_list->dmi_addr);
goto abort;
}
}
/* Enable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
- printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_DISABLE_ALLMULTI,"
- "error status: %d\n", dev->name, err);
+ netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
+ err);
goto abort;
}
@@ -3105,9 +3077,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
status = myri10ge_update_mac_address(mgp, sa->sa_data);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: changing mac address failed with %d\n",
- dev->name, status);
+ netdev_err(dev, "changing mac address failed with %d\n",
+ status);
return status;
}
@@ -3122,12 +3093,10 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
int error = 0;
if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
- printk(KERN_ERR "myri10ge: %s: new mtu (%d) is not valid\n",
- dev->name, new_mtu);
+ netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
return -EINVAL;
}
- printk(KERN_INFO "%s: changing mtu from %d to %d\n",
- dev->name, dev->mtu, new_mtu);
+ netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
@@ -3356,7 +3325,7 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
if (netif_running(netdev)) {
- printk(KERN_INFO "myri10ge: closing %s\n", netdev->name);
+ netdev_info(netdev, "closing\n");
rtnl_lock();
myri10ge_close(netdev);
rtnl_unlock();
@@ -3383,8 +3352,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
msleep(5); /* give card time to respond */
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
- printk(KERN_ERR "myri10ge: %s: device disappeared!\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "device disappeared!\n");
return -EIO;
}
@@ -3463,10 +3431,9 @@ static void myri10ge_watchdog(struct work_struct *work)
* if the card rebooted due to a parity error
* For now, just report it */
reboot = myri10ge_read_reboot(mgp);
- printk(KERN_ERR
- "myri10ge: %s: NIC rebooted (0x%x),%s resetting\n",
- mgp->dev->name, reboot,
- myri10ge_reset_recover ? " " : " not");
+ netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
+ reboot,
+ myri10ge_reset_recover ? "" : " not");
if (myri10ge_reset_recover == 0)
return;
rtnl_lock();
@@ -3494,31 +3461,26 @@ static void myri10ge_watchdog(struct work_struct *work)
if (cmd == 0xffff) {
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
- printk(KERN_ERR
- "myri10ge: %s: device disappeared!\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "device disappeared!\n");
return;
}
}
/* Perhaps it is a software error. Try to reset */
- printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "device timeout, resetting\n");
for (i = 0; i < mgp->num_slices; i++) {
tx = &mgp->ss[i].tx;
- printk(KERN_INFO
- "myri10ge: %s: (%d): %d %d %d %d %d %d\n",
- mgp->dev->name, i, tx->queue_active, tx->req,
- tx->done, tx->pkt_start, tx->pkt_done,
- (int)ntohl(mgp->ss[i].fw_stats->
- send_done_count));
+ netdev_err(mgp->dev, "(%d): %d %d %d %d %d %d\n",
+ i, tx->queue_active, tx->req,
+ tx->done, tx->pkt_start, tx->pkt_done,
+ (int)ntohl(mgp->ss[i].fw_stats->
+ send_done_count));
msleep(2000);
- printk(KERN_INFO
- "myri10ge: %s: (%d): %d %d %d %d %d %d\n",
- mgp->dev->name, i, tx->queue_active, tx->req,
- tx->done, tx->pkt_start, tx->pkt_done,
- (int)ntohl(mgp->ss[i].fw_stats->
- send_done_count));
+ netdev_info(mgp->dev, "(%d): %d %d %d %d %d %d\n",
+ i, tx->queue_active, tx->req,
+ tx->done, tx->pkt_start, tx->pkt_done,
+ (int)ntohl(mgp->ss[i].fw_stats->
+ send_done_count));
}
}
@@ -3528,8 +3490,7 @@ static void myri10ge_watchdog(struct work_struct *work)
}
status = myri10ge_load_firmware(mgp, 1);
if (status != 0)
- printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "failed to load firmware\n");
else
myri10ge_open(mgp->dev);
rtnl_unlock();
@@ -3580,14 +3541,10 @@ static void myri10ge_watchdog_timer(unsigned long arg)
/* nic seems like it might be stuck.. */
if (rx_pause_cnt != mgp->watchdog_pause) {
if (net_ratelimit())
- printk(KERN_WARNING
- "myri10ge %s slice %d:"
- "TX paused, check link partner\n",
- mgp->dev->name, i);
+ netdev_err(mgp->dev, "slice %d: TX paused, check link partner\n",
+ i);
} else {
- printk(KERN_WARNING
- "myri10ge %s slice %d stuck:",
- mgp->dev->name, i);
+ netdev_warn(mgp->dev, "slice %d stuck:", i);
reset_needed = 1;
}
}
@@ -3731,7 +3688,6 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed reset\n");
goto abort_with_fw;
- return;
}
mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
@@ -4085,7 +4041,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
-static struct pci_device_id myri10ge_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
@@ -4127,13 +4083,11 @@ static struct notifier_block myri10ge_dca_notifier = {
static __init int myri10ge_init_module(void)
{
- printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
- MYRI10GE_VERSION_STR);
+ pr_info("Version %s\n", MYRI10GE_VERSION_STR);
if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
- printk(KERN_ERR
- "%s: Illegal rssh hash type %d, defaulting to source port\n",
- myri10ge_driver.name, myri10ge_rss_hash);
+ pr_err("Illegal rssh hash type %d, defaulting to source port\n",
+ myri10ge_rss_hash);
myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
}
#ifdef CONFIG_MYRI10GE_DCA
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b3513ad3b703..b72e749afdf1 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -14,7 +14,6 @@ static char version[] =
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -26,6 +25,7 @@ static char version[] =
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/firmware.h>
+#include <linux/gfp.h>
#include <net/dst.h>
#include <net/arp.h>
@@ -716,10 +716,10 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
pad[0] = MYRI_PAD_LEN;
pad[1] = 0xab;
- /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length
- * in here instead. It is up to the 802.2 layer to carry protocol information.
+ /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
+ * length in here instead.
*/
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce27..e52038783245 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
{ "NatSemi DP8381[56]", 0, 24 },
};
-static struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ } /* terminate list */
@@ -2488,16 +2488,16 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
} else {
struct dev_mc_list *mclist;
int i;
+
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
}
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 992dbfffdb05..f4347f88b6f2 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -142,7 +142,7 @@ bad_clone_list[] __initdata = {
{"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
{"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
#ifdef CONFIG_MACH_TX49XX
- {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
+ {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
#endif
{"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
{NULL,}
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index a53bb201d3c7..ff3c4c814988 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -66,7 +66,6 @@ static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.o
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151c..85aec4f10131 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
};
-static struct pci_device_id ne2k_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
{ 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
{ 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
{ 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index bf4af5248cb7..a361dea35574 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -37,6 +37,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/console.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index 11d94e2434e4..861a0590b1f4 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -18,7 +18,7 @@
# MA 02111-1307, USA.
#
# The full GNU General Public License is included in this distribution
-# in the file called LICENSE.
+# in the file called "COPYING".
#
#
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 76cd1f3e9fc8..0f703838e21a 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 65
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.65"
+#define _NETXEN_NIC_LINUX_SUBVERSION 73
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -420,7 +420,7 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0x3eb000
+#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
@@ -1427,8 +1427,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
extern void netxen_change_ringparam(struct netxen_adapter *adapter);
extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9cb8f6878047..f26e54716c88 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
}
sds_ring->desc_head = (struct status_desc *)addr;
- sds_ring->crb_sts_consumer =
- netxen_get_ioaddr(adapter,
- recv_crb_registers[port].crb_sts_consumer[ring]);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ sds_ring->crb_sts_consumer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_sts_consumer[ring]);
- sds_ring->crb_intr_mask =
- netxen_get_ioaddr(adapter,
- recv_crb_registers[port].sw_int_mask[ring]);
+ sds_ring->crb_intr_mask =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].sw_int_mask[ring]);
+ }
}
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index ddd704ae0188..f8499e56cbee 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -66,7 +66,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
-#define NETXEN_NIC_REGS_COUNT 42
+#define NETXEN_NIC_REGS_COUNT 30
#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
#define NETXEN_MAX_EEPROM_LEN 1024
@@ -312,150 +312,91 @@ static int netxen_nic_get_regs_len(struct net_device *dev)
return NETXEN_NIC_REGS_LEN;
}
-struct netxen_niu_regs {
- __u32 reg[NETXEN_NIC_REGS_COUNT];
-};
-
-static struct netxen_niu_regs niu_registers[] = {
- {
- /* GB Mode */
- {
- NETXEN_NIU_GB_SERDES_RESET,
- NETXEN_NIU_GB0_MII_MODE,
- NETXEN_NIU_GB1_MII_MODE,
- NETXEN_NIU_GB2_MII_MODE,
- NETXEN_NIU_GB3_MII_MODE,
- NETXEN_NIU_GB0_GMII_MODE,
- NETXEN_NIU_GB1_GMII_MODE,
- NETXEN_NIU_GB2_GMII_MODE,
- NETXEN_NIU_GB3_GMII_MODE,
- NETXEN_NIU_REMOTE_LOOPBACK,
- NETXEN_NIU_GB0_HALF_DUPLEX,
- NETXEN_NIU_GB1_HALF_DUPLEX,
- NETXEN_NIU_RESET_SYS_FIFOS,
- NETXEN_NIU_GB_CRC_DROP,
- NETXEN_NIU_GB_DROP_WRONGADDR,
- NETXEN_NIU_TEST_MUX_CTL,
-
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- NETXEN_NIU_GB_MAC_CONFIG_1(0),
- NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0),
- NETXEN_NIU_GB_MAX_FRAME_SIZE(0),
- NETXEN_NIU_GB_TEST_REG(0),
- NETXEN_NIU_GB_MII_MGMT_CONFIG(0),
- NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
- NETXEN_NIU_GB_MII_MGMT_ADDR(0),
- NETXEN_NIU_GB_MII_MGMT_CTRL(0),
- NETXEN_NIU_GB_MII_MGMT_STATUS(0),
- NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
- NETXEN_NIU_GB_INTERFACE_CTRL(0),
- NETXEN_NIU_GB_INTERFACE_STATUS(0),
- NETXEN_NIU_GB_STATION_ADDR_0(0),
- NETXEN_NIU_GB_STATION_ADDR_1(0),
- -1,
- }
- },
- {
- /* XG Mode */
- {
- NETXEN_NIU_XG_SINGLE_TERM,
- NETXEN_NIU_XG_DRIVE_HI,
- NETXEN_NIU_XG_DRIVE_LO,
- NETXEN_NIU_XG_DTX,
- NETXEN_NIU_XG_DEQ,
- NETXEN_NIU_XG_WORD_ALIGN,
- NETXEN_NIU_XG_RESET,
- NETXEN_NIU_XG_POWER_DOWN,
- NETXEN_NIU_XG_RESET_PLL,
- NETXEN_NIU_XG_SERDES_LOOPBACK,
- NETXEN_NIU_XG_DO_BYTE_ALIGN,
- NETXEN_NIU_XG_TX_ENABLE,
- NETXEN_NIU_XG_RX_ENABLE,
- NETXEN_NIU_XG_STATUS,
- NETXEN_NIU_XG_PAUSE_THRESHOLD,
- NETXEN_NIU_XGE_CONFIG_0,
- NETXEN_NIU_XGE_CONFIG_1,
- NETXEN_NIU_XGE_IPG,
- NETXEN_NIU_XGE_STATION_ADDR_0_HI,
- NETXEN_NIU_XGE_STATION_ADDR_0_1,
- NETXEN_NIU_XGE_STATION_ADDR_1_LO,
- NETXEN_NIU_XGE_STATUS,
- NETXEN_NIU_XGE_MAX_FRAME_SIZE,
- NETXEN_NIU_XGE_PAUSE_FRAME_VALUE,
- NETXEN_NIU_XGE_TX_BYTE_CNT,
- NETXEN_NIU_XGE_TX_FRAME_CNT,
- NETXEN_NIU_XGE_RX_BYTE_CNT,
- NETXEN_NIU_XGE_RX_FRAME_CNT,
- NETXEN_NIU_XGE_AGGR_ERROR_CNT,
- NETXEN_NIU_XGE_MULTICAST_FRAME_CNT,
- NETXEN_NIU_XGE_UNICAST_FRAME_CNT,
- NETXEN_NIU_XGE_CRC_ERROR_CNT,
- NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_LOCAL_ERROR_CNT,
- NETXEN_NIU_XGE_REMOTE_ERROR_CNT,
- NETXEN_NIU_XGE_CONTROL_CHAR_CNT,
- NETXEN_NIU_XGE_PAUSE_FRAME_CNT,
- -1,
- }
- }
-};
-
static void
netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 mode, *regs_buff = p;
- int i, window;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct nx_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+ int port = adapter->physical_port;
memset(p, 0, NETXEN_NIC_REGS_LEN);
+
regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
(adapter->pdev)->device;
- /* which mode */
- regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE);
- mode = regs_buff[0];
-
- /* Common registers to all the modes */
- regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER);
- /* GB/XGB Mode */
- mode = (mode / 2) - 1;
- window = 0;
- if (mode <= 1) {
- for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) {
- /* GB: port specific registers */
- if (mode == 0 && i >= 19)
- window = adapter->physical_port *
- NETXEN_NIC_PORT_WINDOW;
-
- regs_buff[i] = NXRD32(adapter,
- niu_registers[mode].reg[i - 3] + window);
- }
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+ i += 2;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+ } else {
+ i++;
+
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+ regs_buff[i++] = NXRDIO(adapter,
+ adapter->tx_ring->crb_cmd_consumer);
+ }
+
+ regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = NXRDIO(adapter,
+ sds_ring->crb_sts_consumer);
}
}
static u32 netxen_nic_test_link(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 status;
- int val;
+ u32 val, port;
- /* read which mode */
- if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if (adapter->phy_read &&
- adapter->phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status) != 0)
- return -EIO;
- else {
- val = netxen_get_phy_link(status);
- return !val;
- }
- } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+ } else {
val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
return (val == XG_LINK_UP) ? 0 : 1;
}
- return -EIO;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d138fc22927a..622e4c8be937 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -969,7 +969,8 @@ enum {
#define NX_DEV_READY 3
#define NX_DEV_NEED_RESET 4
#define NX_DEV_NEED_QUISCENT 5
-#define NX_DEV_FAILED 6
+#define NX_DEV_NEED_AER 6
+#define NX_DEV_FAILED 7
#define NX_RCODE_DRIVER_INFO 0x20000000
#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 2e364fee3cbb..b1cf46a0c48c 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -19,10 +19,11 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
+#include <linux/slab.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -345,8 +346,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
void
netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
{
- int val;
- val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
@@ -540,7 +540,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
struct netxen_adapter *adapter = netdev_priv(netdev);
struct dev_mc_list *mc_ptr;
u8 null_addr[6];
- int index = 0;
+ int i;
memset(null_addr, 0, 6);
@@ -555,7 +555,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
return;
}
- if (netdev->mc_count == 0) {
+ if (netdev_mc_empty(netdev)) {
adapter->set_promisc(adapter,
NETXEN_NIU_NON_PROMISC_MODE);
netxen_nic_disable_mcast_filter(adapter);
@@ -564,23 +564,20 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > adapter->max_mc_count) {
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
netxen_nic_disable_mcast_filter(adapter);
return;
}
netxen_nic_enable_mcast_filter(adapter);
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++)
- netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr);
-
- if (index != netdev->mc_count)
- printk(KERN_WARNING "%s: %s multicast address count mismatch\n",
- netxen_nic_driver_name, netdev->name);
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr);
/* Clear out remaining addresses */
- for (; index < adapter->max_mc_count; index++)
- netxen_nic_set_mcast_addr(adapter, index, null_addr);
+ while (i < adapter->max_mc_count)
+ netxen_nic_set_mcast_addr(adapter, i++, null_addr);
}
static int
@@ -691,6 +688,9 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
struct list_head *head;
nx_mac_list_t *cur;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
list_splice_tail_init(&adapter->mac_list, &del_list);
nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
@@ -702,16 +702,14 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
- if (netdev->mc_count > 0) {
- for (mc_ptr = netdev->mc_list; mc_ptr;
- mc_ptr = mc_ptr->next) {
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(mc_ptr, netdev)
nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
- }
}
send_fw_cmd:
@@ -775,17 +773,20 @@ int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
{
nx_nic_req_t req;
- u64 word;
- int rv;
+ u64 word[6];
+ int rv, i;
memset(&req, 0, sizeof(nx_nic_req_t));
+ memset(word, 0, sizeof(word));
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
- word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
- req.req_hdr = cpu_to_le64(word);
+ word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
- memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0) {
@@ -1031,7 +1032,7 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
return 0;
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
__le32 *pmac = (__le32 *) mac;
u32 offset;
@@ -1056,7 +1057,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
return 0;
}
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
uint32_t crbaddr, mac_hi, mac_lo;
int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 3fd1dcb3583a..e2c5b6f2df03 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02f8d4b4db63..02876f59cbb2 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -19,12 +19,13 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -184,6 +185,8 @@ skip_rds:
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
+ kfree(tx_ring);
+ adapter->tx_ring = NULL;
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
@@ -759,7 +762,7 @@ nx_get_bios_version(struct netxen_adapter *adapter)
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ NX_UNI_BIOS_VERSION_OFF));
- return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
(bios_ver >> 24);
} else
return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
@@ -778,11 +781,14 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
+ if (adapter->need_fw_reset)
+ return 1;
+
/* last attempt had failed */
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
- old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
for (i = 0; i < 10; i++) {
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 6cae26a5bd67..ce838f7c8b0f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -19,10 +19,11 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "netxen_nic_hw.h"
@@ -35,6 +36,7 @@
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
+#include <linux/aer.h>
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -84,6 +86,7 @@ static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_create_diag_entries(struct netxen_adapter *adapter);
static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -98,7 +101,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -340,7 +343,7 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
if (!(first_boot & 0x4)) {
first_boot |= 0x4;
NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
- first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ NXRD32(adapter, NETXEN_PCIE_REG(0x4));
}
/* This is the first boot after power up */
@@ -430,7 +433,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
{
int i;
unsigned char *p;
- __le64 mac_addr;
+ u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -602,16 +605,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter)
static int
netxen_setup_pci_map(struct netxen_adapter *adapter)
{
- void __iomem *mem_ptr0 = NULL;
- void __iomem *mem_ptr1 = NULL;
- void __iomem *mem_ptr2 = NULL;
void __iomem *db_ptr = NULL;
resource_size_t mem_base, db_base;
- unsigned long mem_len, db_len = 0, pci_len0 = 0;
+ unsigned long mem_len, db_len = 0;
struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func;
+ struct netxen_hardware_context *ahw = &adapter->ahw;
int err = 0;
@@ -628,24 +629,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
/* 128 Meg of memory */
if (mem_len == NETXEN_PCI_128MB_SIZE) {
- mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
- mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+
+ ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+ ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
SECOND_PAGE_GROUP_SIZE);
- mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
THIRD_PAGE_GROUP_SIZE);
- pci_len0 = FIRST_PAGE_GROUP_SIZE;
+ if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+ ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
} else if (mem_len == NETXEN_PCI_32MB_SIZE) {
- mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
- mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+
+ ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
} else if (mem_len == NETXEN_PCI_2MB_SIZE) {
- mem_ptr0 = pci_ioremap_bar(pdev, 0);
- if (mem_ptr0 == NULL) {
+ ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+ if (ahw->pci_base0 == NULL) {
dev_err(&pdev->dev, "failed to map PCI bar 0\n");
return -EIO;
}
- pci_len0 = mem_len;
+ ahw->pci_len0 = mem_len;
} else {
return -EIO;
}
@@ -654,11 +671,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
- adapter->ahw.pci_base0 = mem_ptr0;
- adapter->ahw.pci_len0 = pci_len0;
- adapter->ahw.pci_base1 = mem_ptr1;
- adapter->ahw.pci_base2 = mem_ptr2;
-
if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
@@ -1244,8 +1256,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
- if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
- pr_warning("%s: chip revisions between 0x%x-0x%x"
+ if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+ pr_warning("%s: chip revisions between 0x%x-0x%x "
"will not be enabled.\n",
module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
return -ENODEV;
@@ -1262,6 +1274,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
goto err_out_disable_pdev;
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_enable_pcie_error_reporting(pdev);
+
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1409,17 +1424,19 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_release_firmware(adapter);
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
-static int __netxen_nic_shutdown(struct pci_dev *pdev)
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
{
- struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
@@ -1438,53 +1455,22 @@ static int __netxen_nic_shutdown(struct pci_dev *pdev)
nx_decr_dev_ref_cnt(adapter);
clear_bit(__NX_RESETTING, &adapter->state);
-
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- if (netxen_nic_wol_supported(adapter)) {
- pci_enable_wake(pdev, PCI_D3cold, 1);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- }
-
- pci_disable_device(pdev);
-
- return 0;
-}
-static void netxen_nic_shutdown(struct pci_dev *pdev)
-{
- if (__netxen_nic_shutdown(pdev))
- return;
-}
-#ifdef CONFIG_PM
-static int
-netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- int retval;
-
- retval = __netxen_nic_shutdown(pdev);
- if (retval)
- return retval;
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
}
-static int
-netxen_nic_resume(struct pci_dev *pdev)
+static int netxen_nic_attach_func(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
err = pci_enable_device(pdev);
if (err)
return err;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
adapter->ahw.crb_win = -1;
adapter->ahw.ocm_win = -1;
@@ -1503,11 +1489,10 @@ netxen_nic_resume(struct pci_dev *pdev)
if (err)
goto err_out_detach;
- netif_device_attach(netdev);
-
netxen_config_indev_addr(netdev, NETDEV_UP);
}
+ netif_device_attach(netdev);
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
return 0;
@@ -1517,6 +1502,85 @@ err_out:
nx_decr_dev_ref_cnt(adapter);
return err;
}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (nx_dev_request_aer(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ netxen_nic_detach_func(adapter);
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ err = netxen_nic_attach_func(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_io_resume(struct pci_dev *pdev)
+{
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ netxen_nic_detach_func(adapter);
+
+ if (pci_save_state(pdev))
+ return;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ int retval;
+
+ netxen_nic_detach_func(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int
+netxen_nic_resume(struct pci_dev *pdev)
+{
+ return netxen_nic_attach_func(pdev);
+}
#endif
static int netxen_nic_open(struct net_device *netdev)
@@ -1898,12 +1962,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
linkup = (val == XG_LINK_UP_P3);
} else {
val = NXRD32(adapter, CRB_XG_STATE);
- if (adapter->ahw.port_type == NETXEN_NIC_GBE)
- linkup = (val >> port) & 1;
- else {
- val = (val >> port*8) & 0xff;
- linkup = (val == XG_LINK_UP);
- }
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
}
netxen_advert_link_change(adapter, linkup);
@@ -1945,7 +2005,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
clear_bit(__NX_RESETTING, &adapter->state);
-
+ return;
} else {
clear_bit(__NX_RESETTING, &adapter->state);
if (!netxen_nic_reset_context(adapter)) {
@@ -2108,20 +2168,49 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
return count;
}
-static void
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_AER)
+ ret = 0;
+ else if (state == NX_DEV_READY) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+ return ret;
+}
+
+static int
nx_dev_request_reset(struct netxen_adapter *adapter)
{
u32 state;
+ int ret = -EINVAL;
if (netxen_api_lock(adapter))
- return;
+ return ret;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state != NX_DEV_INITALIZING)
+ if (state == NX_DEV_NEED_RESET)
+ ret = 0;
+ else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ ret = 0;
+ }
netxen_api_unlock(adapter);
+
+ return ret;
}
static int
@@ -2244,7 +2333,9 @@ netxen_detach_work(struct work_struct *work)
netxen_nic_down(adapter, netdev);
+ rtnl_lock();
netxen_nic_detach(adapter);
+ rtnl_unlock();
status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
@@ -2273,17 +2364,29 @@ netxen_check_health(struct netxen_adapter *adapter)
u32 state, heartbit;
struct net_device *netdev = adapter->netdev;
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (state == NX_DEV_NEED_AER)
+ return 0;
+
if (netxen_nic_check_temp(adapter))
goto detach;
if (adapter->need_fw_reset) {
- nx_dev_request_reset(adapter);
+ if (nx_dev_request_reset(adapter))
+ return 0;
goto detach;
}
- state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state == NX_DEV_NEED_RESET)
- goto detach;
+ /* NX_DEV_NEED_RESET, this state can be marked in two cases
+ * 1. Tx timeout 2. Fw hang
+ * Send request to destroy context in case of tx timeout only
+ * and doesn't required in case of Fw hang
+ */
+ if (state == NX_DEV_NEED_RESET) {
+ adapter->need_fw_reset = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ goto detach;
+ }
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
@@ -2292,12 +2395,17 @@ netxen_check_health(struct netxen_adapter *adapter)
if (heartbit != adapter->heartbit) {
adapter->heartbit = heartbit;
adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
return 0;
}
if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
return 0;
+ if (nx_dev_request_reset(adapter))
+ return 0;
+
clear_bit(__NX_FW_ATTACHED, &adapter->state);
dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2500,7 +2608,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
return size;
}
-ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
@@ -2727,6 +2835,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
+static struct pci_error_handlers netxen_err_handler = {
+ .error_detected = netxen_io_error_detected,
+ .slot_reset = netxen_io_slot_reset,
+ .resume = netxen_io_resume,
+};
+
static struct pci_driver netxen_driver = {
.name = netxen_nic_driver_name,
.id_table = netxen_pci_tbl,
@@ -2736,7 +2850,8 @@ static struct pci_driver netxen_driver = {
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume,
#endif
- .shutdown = netxen_nic_shutdown
+ .shutdown = netxen_nic_shutdown,
+ .err_handler = &netxen_err_handler
};
static int __init netxen_init_module(void)
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 6a87d810e59d..3892330f244a 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -51,7 +51,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -651,7 +650,8 @@ static void ni5010_set_multicast_list(struct net_device *dev)
PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
- if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
+ if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
+ !netdev_mc_empty(dev)) {
outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
} else {
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index b42f5e522f90..f7a8f707361e 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -109,7 +109,6 @@ static int fifo = 0x8; /* don't change */
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -596,8 +595,8 @@ static int init586(struct net_device *dev)
struct iasetup_cmd_struct __iomem *ias_cmd;
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ struct dev_mc_list *dmi;
+ int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
@@ -724,9 +723,9 @@ static int init586(struct net_device *dev)
writew(0xffff, &mc_cmd->cmd_link);
writew(num_addrs * 6, &mc_cmd->mc_cnt);
- for (i = 0; i < num_addrs; i++, dmi = dmi->next)
- memcpy_toio(mc_cmd->mc_list[i],
- dmi->dmi_addr, 6);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
writew(make16(mc_cmd), &p->scb->cbl_offset);
writeb(CUC_START, &p->scb->cmd_cuc);
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index ae19aafd2c7e..9225c76cac40 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -849,7 +849,7 @@ static int ni65_lance_reinit(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
ni65_init_lance(p,dev->dev_addr,0xff,0x0);
else
ni65_init_lance(p,dev->dev_addr,0x00,0x00);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8ce58c4c7dd3..d5cd16bfc907 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3,6 +3,8 @@
* Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -23,6 +25,7 @@
#include <linux/jiffies.h>
#include <linux/crc32.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/io.h>
@@ -33,7 +36,6 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "Nov 14, 2008"
@@ -58,7 +60,7 @@ static void writeq(u64 val, void __iomem *reg)
}
#endif
-static struct pci_device_id niu_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
{}
};
@@ -89,21 +91,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "NIU debug level");
-#define niudbg(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_DEBUG PFX f, ## a); \
-} while (0)
-
-#define niuinfo(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_INFO PFX f, ## a); \
-} while (0)
-
-#define niuwarn(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_WARNING PFX f, ## a); \
-} while (0)
-
#define niu_lock_parent(np, flags) \
spin_lock_irqsave(&np->parent->lock, flags)
#define niu_unlock_parent(np, flags) \
@@ -135,10 +122,9 @@ static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
nw64_mac(reg, bits);
err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_mac(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_mac(reg));
return err;
}
@@ -175,10 +161,9 @@ static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_ipp(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_ipp(reg));
return err;
}
@@ -216,10 +201,9 @@ static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
nw64(reg, bits);
err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64(reg));
return err;
}
@@ -475,9 +459,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -486,9 +469,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -531,8 +513,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -569,9 +551,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -580,9 +561,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -639,9 +619,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- pr_info(PFX "NIU Port %u signal bits [%08x] are not "
- "[%08x] for 10G...trying 1G\n",
- np->port, (int) (sig & mask), (int) val);
+ pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
+ np->port, (int)(sig & mask), (int)val);
/* 10G failed, try initializing at 1G */
err = serdes_init_niu_1g_serdes(np);
@@ -649,8 +628,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES "
- "Link Failed \n", np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -764,9 +743,8 @@ static int esr_reset(struct niu *np)
if (err)
return err;
if (reset != 0) {
- dev_err(np->device, PFX "Port %u ESR_RESET "
- "did not clear [%08x]\n",
- np->port, reset);
+ netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
+ np->port, reset);
return -ENODEV;
}
@@ -890,8 +868,8 @@ static int serdes_init_10g(struct niu *np)
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
return 0;
}
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
@@ -1039,8 +1017,8 @@ static int serdes_init_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -1332,8 +1310,8 @@ static int bcm8704_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u PHY will not reset "
- "(bmcr=%04x)\n", np->port, (err & 0xffff));
+ netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
+ np->port, (err & 0xffff));
return -ENODEV;
}
return 0;
@@ -1515,21 +1493,18 @@ static int xcvr_diag_bcm870x(struct niu *np)
MII_STAT1000);
if (err < 0)
return err;
- pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
if (err < 0)
return err;
- pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
- np->port, err);
+ pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
MII_NWAYTEST);
if (err < 0)
return err;
- pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
#endif
/* XXX dig this out it might not be so useful XXX */
@@ -1555,11 +1530,11 @@ static int xcvr_diag_bcm870x(struct niu *np)
if (analog_stat0 != 0x03fc) {
if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
- pr_info(PFX "Port %u cable not connected "
- "or bad cable.\n", np->port);
+ pr_info("Port %u cable not connected or bad cable\n",
+ np->port);
} else if (analog_stat0 == 0x639c) {
- pr_info(PFX "Port %u optical module is bad "
- "or missing.\n", np->port);
+ pr_info("Port %u optical module is bad or missing\n",
+ np->port);
}
}
@@ -1699,8 +1674,8 @@ static int mii_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u MII would not reset, "
- "bmcr[%04x]\n", np->port, err);
+ netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
+ np->port, err);
return -ENODEV;
}
@@ -1895,7 +1870,7 @@ static int mii_init_common(struct niu *np)
return err;
bmsr = err;
- pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
+ pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
np->port, bmcr, bmsr);
#endif
@@ -1948,16 +1923,12 @@ static int niu_link_status_common(struct niu *np, int link_up)
unsigned long flags;
if (!netif_carrier_ok(dev) && link_up) {
- niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
- dev->name,
- (lp->active_speed == SPEED_10000 ?
- "10Gb/sec" :
- (lp->active_speed == SPEED_1000 ?
- "1Gb/sec" :
- (lp->active_speed == SPEED_100 ?
- "100Mbit/sec" : "10Mbit/sec"))),
- (lp->active_duplex == DUPLEX_FULL ?
- "full" : "half"));
+ netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
+ lp->active_speed == SPEED_10000 ? "10Gb/sec" :
+ lp->active_speed == SPEED_1000 ? "1Gb/sec" :
+ lp->active_speed == SPEED_100 ? "100Mbit/sec" :
+ "10Mbit/sec",
+ lp->active_duplex == DUPLEX_FULL ? "full" : "half");
spin_lock_irqsave(&np->lock, flags);
niu_init_xif(np);
@@ -1966,7 +1937,7 @@ static int niu_link_status_common(struct niu *np, int link_up)
netif_carrier_on(dev);
} else if (netif_carrier_ok(dev) && !link_up) {
- niuwarn(LINK, "%s: Link is down\n", dev->name);
+ netif_warn(np, link, dev, "Link is down\n");
spin_lock_irqsave(&np->lock, flags);
niu_handle_led(np, 0);
spin_unlock_irqrestore(&np->lock, flags);
@@ -2232,8 +2203,8 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
} else {
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
*link_up_p = 0;
- niuwarn(LINK, "%s: Hotplug PHY Removed\n",
- np->dev->name);
+ netif_warn(np, link, np->dev,
+ "Hotplug PHY Removed\n");
}
}
out:
@@ -2531,8 +2502,8 @@ static int serdes_init_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
- np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -2844,7 +2815,7 @@ static int tcam_wait_bit(struct niu *np, u64 bit)
break;
udelay(1);
}
- if (limit < 0)
+ if (limit <= 0)
return -ENODEV;
return 0;
@@ -3234,23 +3205,22 @@ static int fflp_early_init(struct niu *np)
parent = np->parent;
err = 0;
if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
- niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
- np->port);
if (np->parent->plat_type != PLAT_TYPE_NIU) {
fflp_reset(np);
fflp_set_timings(np);
err = fflp_disable_all_partitions(np);
if (err) {
- niudbg(PROBE, "fflp_disable_all_partitions "
- "failed, err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_disable_all_partitions failed, err=%d\n",
+ err);
goto out;
}
}
err = tcam_early_init(np);
if (err) {
- niudbg(PROBE, "tcam_early_init failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_early_init failed, err=%d\n", err);
goto out;
}
fflp_llcsnap_enable(np, 1);
@@ -3260,22 +3230,22 @@ static int fflp_early_init(struct niu *np)
err = tcam_flush_all(np);
if (err) {
- niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_flush_all failed, err=%d\n", err);
goto out;
}
if (np->parent->plat_type != PLAT_TYPE_NIU) {
err = fflp_hash_clear(np);
if (err) {
- niudbg(PROBE, "fflp_hash_clear failed, "
- "err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_hash_clear failed, err=%d\n",
+ err);
goto out;
}
}
vlan_tbl_clear(np);
- niudbg(PROBE, "fflp_early_init: Success\n");
parent->flags |= PARENT_FLGS_CLS_HWINIT;
}
out:
@@ -3665,8 +3635,8 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
cons = rp->cons;
- niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
- np->dev->name, pkt_cnt, cons);
+ netif_printk(np, tx_done, KERN_DEBUG, np->dev,
+ "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
while (pkt_cnt--)
cons = release_tx_packet(np, rp, cons);
@@ -3714,11 +3684,12 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_errors += misc & RXMISC_COUNT;
if (unlikely(misc & RXMISC_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "RXMISC discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
+ rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
- np->dev->name, rx_channel, misc, misc-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: MISC drop=%u over=%u\n",
+ rx_channel, misc, misc-limit);
}
/* WRED (Weighted Random Early Discard) by hardware */
@@ -3728,11 +3699,11 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
if (unlikely(wred & RED_DIS_CNT_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "WRED discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
- np->dev->name, rx_channel, wred, wred-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: WRED drop=%u over=%u\n",
+ rx_channel, wred, wred-limit);
}
}
@@ -3753,8 +3724,9 @@ static int niu_rx_work(struct napi_struct *napi, struct niu *np,
mbox->rx_dma_ctl_stat = 0;
mbox->rcrstat_a = 0;
- niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
- np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
+ netif_printk(np, rx_status, KERN_DEBUG, np->dev,
+ "%s(chan[%d]), stat[%llx] qlen=%d\n",
+ __func__, rp->rx_channel, (unsigned long long)stat, qlen);
rcr_done = work_done = 0;
qlen = min(qlen, budget);
@@ -3791,8 +3763,8 @@ static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
u32 rx_vec = (v0 & 0xffffffff);
int i, work_done = 0;
- niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
- np->dev->name, (unsigned long long) v0);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -3837,39 +3809,38 @@ static int niu_poll(struct napi_struct *napi, int budget)
static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
u64 stat)
{
- dev_err(np->device, PFX "%s: RX channel %u errors ( ",
- np->dev->name, rp->rx_channel);
+ netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
- printk("RBR_TMOUT ");
+ pr_cont("RBR_TMOUT ");
if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
- printk("RSP_CNT ");
+ pr_cont("RSP_CNT ");
if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
- printk("BYTE_EN_BUS ");
+ pr_cont("BYTE_EN_BUS ");
if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
- printk("RSP_DAT ");
+ pr_cont("RSP_DAT ");
if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
- printk("RCR_ACK ");
+ pr_cont("RCR_ACK ");
if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
- printk("RCR_SHA_PAR ");
+ pr_cont("RCR_SHA_PAR ");
if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
- printk("RBR_PRE_PAR ");
+ pr_cont("RBR_PRE_PAR ");
if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
- printk("CONFIG ");
+ pr_cont("CONFIG ");
if (stat & RX_DMA_CTL_STAT_RCRINCON)
- printk("RCRINCON ");
+ pr_cont("RCRINCON ");
if (stat & RX_DMA_CTL_STAT_RCRFULL)
- printk("RCRFULL ");
+ pr_cont("RCRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRFULL)
- printk("RBRFULL ");
+ pr_cont("RBRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
- printk("RBRLOGPAGE ");
+ pr_cont("RBRLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
- printk("CFIGLOGPAGE ");
+ pr_cont("CFIGLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
- printk("DC_FIDO ");
+ pr_cont("DC_FIDO ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
@@ -3883,9 +3854,9 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
err = -EINVAL;
if (err) {
- dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
- np->dev->name, rp->rx_channel,
- (unsigned long long) stat);
+ netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
+ rp->rx_channel,
+ (unsigned long long) stat);
niu_log_rxchan_errors(np, rp, stat);
}
@@ -3899,27 +3870,26 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
u64 cs)
{
- dev_err(np->device, PFX "%s: TX channel %u errors ( ",
- np->dev->name, rp->tx_channel);
+ netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
if (cs & TX_CS_MBOX_ERR)
- printk("MBOX ");
+ pr_cont("MBOX ");
if (cs & TX_CS_PKT_SIZE_ERR)
- printk("PKT_SIZE ");
+ pr_cont("PKT_SIZE ");
if (cs & TX_CS_TX_RING_OFLOW)
- printk("TX_RING_OFLOW ");
+ pr_cont("TX_RING_OFLOW ");
if (cs & TX_CS_PREF_BUF_PAR_ERR)
- printk("PREF_BUF_PAR ");
+ pr_cont("PREF_BUF_PAR ");
if (cs & TX_CS_NACK_PREF)
- printk("NACK_PREF ");
+ pr_cont("NACK_PREF ");
if (cs & TX_CS_NACK_PKT_RD)
- printk("NACK_PKT_RD ");
+ pr_cont("NACK_PKT_RD ");
if (cs & TX_CS_CONF_PART_ERR)
- printk("CONF_PART ");
+ pr_cont("CONF_PART ");
if (cs & TX_CS_PKT_PRT_ERR)
- printk("PKT_PTR ");
+ pr_cont("PKT_PTR ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
@@ -3930,12 +3900,11 @@ static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
- dev_err(np->device, PFX "%s: TX channel %u error, "
- "cs[%llx] logh[%llx] logl[%llx]\n",
- np->dev->name, rp->tx_channel,
- (unsigned long long) cs,
- (unsigned long long) logh,
- (unsigned long long) logl);
+ netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
+ rp->tx_channel,
+ (unsigned long long)cs,
+ (unsigned long long)logh,
+ (unsigned long long)logl);
niu_log_txchan_errors(np, rp, cs);
@@ -3954,9 +3923,8 @@ static int niu_mif_interrupt(struct niu *np)
phy_mdint = 1;
}
- dev_err(np->device, PFX "%s: MIF interrupt, "
- "stat[%llx] phy_mdint(%d)\n",
- np->dev->name, (unsigned long long) mif_status, phy_mdint);
+ netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
+ (unsigned long long)mif_status, phy_mdint);
return -ENODEV;
}
@@ -4081,41 +4049,40 @@ static int niu_mac_interrupt(struct niu *np)
static void niu_log_device_error(struct niu *np, u64 stat)
{
- dev_err(np->device, PFX "%s: Core device errors ( ",
- np->dev->name);
+ netdev_err(np->dev, "Core device errors ( ");
if (stat & SYS_ERR_MASK_META2)
- printk("META2 ");
+ pr_cont("META2 ");
if (stat & SYS_ERR_MASK_META1)
- printk("META1 ");
+ pr_cont("META1 ");
if (stat & SYS_ERR_MASK_PEU)
- printk("PEU ");
+ pr_cont("PEU ");
if (stat & SYS_ERR_MASK_TXC)
- printk("TXC ");
+ pr_cont("TXC ");
if (stat & SYS_ERR_MASK_RDMC)
- printk("RDMC ");
+ pr_cont("RDMC ");
if (stat & SYS_ERR_MASK_TDMC)
- printk("TDMC ");
+ pr_cont("TDMC ");
if (stat & SYS_ERR_MASK_ZCP)
- printk("ZCP ");
+ pr_cont("ZCP ");
if (stat & SYS_ERR_MASK_FFLP)
- printk("FFLP ");
+ pr_cont("FFLP ");
if (stat & SYS_ERR_MASK_IPP)
- printk("IPP ");
+ pr_cont("IPP ");
if (stat & SYS_ERR_MASK_MAC)
- printk("MAC ");
+ pr_cont("MAC ");
if (stat & SYS_ERR_MASK_SMX)
- printk("SMX ");
+ pr_cont("SMX ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_device_error(struct niu *np)
{
u64 stat = nr64(SYS_ERR_STAT);
- dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netdev_err(np->dev, "Core device error, stat[%llx]\n",
+ (unsigned long long)stat);
niu_log_device_error(np, stat);
@@ -4197,8 +4164,8 @@ static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
RX_DMA_CTL_STAT_RCRTO);
nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
- niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
}
static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
@@ -4206,8 +4173,8 @@ static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
{
rp->tx_cs = nr64(TX_CS(rp->tx_channel));
- niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
- np->dev->name, (unsigned long long) rp->tx_cs);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
}
static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
@@ -4265,8 +4232,8 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
u64 v0, v1, v2;
if (netif_msg_intr(np))
- printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
- lp, ldg);
+ printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
+ __func__, lp, ldg);
spin_lock_irqsave(&np->lock, flags);
@@ -4275,7 +4242,7 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
v2 = nr64(LDSV2(ldg));
if (netif_msg_intr(np))
- printk("v0[%llx] v1[%llx] v2[%llx]\n",
+ pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
(unsigned long long) v0,
(unsigned long long) v1,
(unsigned long long) v2);
@@ -4400,8 +4367,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4411,8 +4378,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rcr)
return -ENOMEM;
if ((unsigned long)rp->rcr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
+ rp->rcr);
return -EINVAL;
}
rp->rcr_table_size = MAX_RCR_RING_SIZE;
@@ -4424,8 +4391,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rbr)
return -ENOMEM;
if ((unsigned long)rp->rbr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
+ rp->rbr);
return -EINVAL;
}
rp->rbr_table_size = MAX_RBR_RING_SIZE;
@@ -4458,8 +4425,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4469,8 +4436,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->descr)
return -ENOMEM;
if ((unsigned long)rp->descr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA descr table %p\n", np->dev->name, rp->descr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
+ rp->descr);
return -EINVAL;
}
@@ -4726,10 +4693,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
TX_RNG_CFIG_STADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "DMA addr (%llx) is not aligned.\n",
- np->dev->name, channel,
- (unsigned long long) rp->descr_dma);
+ netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
+ channel, (unsigned long long)rp->descr_dma);
return -EINVAL;
}
@@ -4746,10 +4711,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "MBOX addr (%llx) is has illegal bits.\n",
- np->dev->name, channel,
- (unsigned long long) rp->mbox_dma);
+ netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
+ channel, (unsigned long long)rp->mbox_dma);
return -EINVAL;
}
nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
@@ -5146,9 +5109,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5160,9 +5122,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5527,8 +5488,7 @@ static int niu_reset_tx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
- "BTXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BTXMAC_SW_RST));
return -ENODEV;
@@ -5629,12 +5589,11 @@ static int niu_reset_rx_xmac(struct niu *np)
while (--limit >= 0) {
if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
XRXMAC_SW_RST_SOFT_RST)))
- break;
+ break;
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
- "XRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(XRXMAC_SW_RST));
return -ENODEV;
@@ -5655,8 +5614,7 @@ static int niu_reset_rx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
- "BRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BRXMAC_SW_RST));
return -ENODEV;
@@ -5960,11 +5918,9 @@ static void niu_disable_ipp(struct niu *np)
}
if (limit < 0 &&
(rd != 0 && wr != 1)) {
- dev_err(np->device, PFX "%s: IPP would not quiesce, "
- "rd_ptr[%llx] wr_ptr[%llx]\n",
- np->dev->name,
- (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
- (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
+ netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
+ (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
+ (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
}
val = nr64_ipp(IPP_CFIG);
@@ -5981,12 +5937,12 @@ static int niu_init_hw(struct niu *np)
{
int i, err;
- niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
niu_txc_enable_port(np, 1);
niu_txc_port_dma_enable(np, 1);
niu_txc_set_imask(np, 0);
- niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -5995,27 +5951,27 @@ static int niu_init_hw(struct niu *np)
return err;
}
- niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
err = niu_init_rx_channels(np);
if (err)
goto out_uninit_tx_channels;
- niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
err = niu_init_classifier_hw(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
err = niu_init_zcp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
err = niu_init_ipp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
err = niu_init_mac(np);
if (err)
goto out_uninit_ipp;
@@ -6023,16 +5979,16 @@ static int niu_init_hw(struct niu *np)
return 0;
out_uninit_ipp:
- niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
niu_disable_ipp(np);
out_uninit_rx_channels:
- niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
niu_stop_rx_channels(np);
niu_reset_rx_channels(np);
out_uninit_tx_channels:
- niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
niu_stop_tx_channels(np);
niu_reset_tx_channels(np);
@@ -6041,25 +5997,25 @@ out_uninit_tx_channels:
static void niu_stop_hw(struct niu *np)
{
- niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
niu_enable_interrupts(np, 0);
- niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
niu_enable_rx_mac(np, 0);
- niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
niu_disable_ipp(np);
- niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
niu_stop_tx_channels(np);
- niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
niu_stop_rx_channels(np);
- niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
niu_reset_tx_channels(np);
- niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
niu_reset_rx_channels(np);
}
@@ -6369,10 +6325,10 @@ static void niu_set_rx_mode(struct net_device *dev)
np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
if (dev->flags & IFF_PROMISC)
np->flags |= NIU_FLAGS_PROMISC;
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
+ if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
np->flags |= NIU_FLAGS_MCAST;
- alt_cnt = dev->uc.count;
+ alt_cnt = netdev_uc_count(dev);
if (alt_cnt > niu_num_alt_addr(np)) {
alt_cnt = 0;
np->flags |= NIU_FLAGS_PROMISC;
@@ -6381,17 +6337,15 @@ static void niu_set_rx_mode(struct net_device *dev)
if (alt_cnt) {
int index = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
err = niu_set_alt_mac(np, index, ha->addr);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "adding alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d adding alt mac %d\n",
+ err, index);
err = niu_enable_alt_mac(np, index, 1);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "enabling alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d enabling alt mac %d\n",
+ err, index);
index++;
}
@@ -6404,16 +6358,15 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = alt_start; i < niu_num_alt_addr(np); i++) {
err = niu_enable_alt_mac(np, i, 0);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "disabling alt mac %d\n",
- dev->name, err, i);
+ netdev_warn(dev, "Error %d disabling alt mac %d\n",
+ err, i);
}
}
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
- } else if (dev->mc_count > 0) {
- for (addr = dev->mc_list; addr; addr = addr->next) {
+ } else if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(addr, dev) {
u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
crc >>= 24;
@@ -6570,7 +6523,7 @@ static void niu_tx_timeout(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
- dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
+ dev_err(np->device, "%s: Transmit timed out, resetting\n",
dev->name);
schedule_work(&np->reset_task);
@@ -6672,8 +6625,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_tx_stop_queue(txq);
- dev_err(np->device, PFX "%s: BUG! Tx ring full when "
- "queue awake!\n", dev->name);
+ dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
rp->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -7237,8 +7189,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
tp = &parent->tcam[idx];
if (!tp->valid) {
- pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n",
- parent->index, np->dev->name, (u16)nfc->fs.location, idx);
+ netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
+ parent->index, (u16)nfc->fs.location, idx);
return -EINVAL;
}
@@ -7248,8 +7200,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
ret = niu_class_to_ethflow(class, &fsp->flow_type);
if (ret < 0) {
- pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
+ parent->index);
ret = -EINVAL;
goto out;
}
@@ -7332,9 +7284,8 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
if (n_entries != cnt) {
/* print warning, this should not happen */
- pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, "
- "n_entries[%d] != cnt[%d]!!!\n\n",
- np->parent->index, np->dev->name, n_entries, cnt);
+ netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
+ np->parent->index, __func__, n_entries, cnt);
}
return 0;
@@ -7561,9 +7512,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
}
}
if (!add_usr_cls) {
- pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: "
- "Could not find/insert class for pid %d\n",
- parent->index, np->dev->name, uspec->proto);
+ netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
+ parent->index, __func__, uspec->proto);
ret = -EINVAL;
goto out;
}
@@ -7596,9 +7546,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
case AH_V6_FLOW:
case ESP_V6_FLOW:
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "flow %d for IPv6 not implemented\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
case IP_USER_FLOW:
@@ -7607,17 +7556,15 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
class);
} else {
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "usr flow for IPv6 not implemented\n\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
+ parent->index, __func__);
ret = -EINVAL;
goto out;
}
break;
default:
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Unknown flow type %d\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
}
@@ -7627,10 +7574,9 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
tp->assoc_data = TCAM_ASSOCDATA_DISC;
} else {
if (fsp->ring_cookie >= np->num_rx_rings) {
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Invalid RX ring %lld\n\n",
- parent->index, np->dev->name,
- (long long) fsp->ring_cookie);
+ netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
+ parent->index, __func__,
+ (long long)fsp->ring_cookie);
ret = -EINVAL;
goto out;
}
@@ -7699,10 +7645,9 @@ static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
}
}
if (i == NIU_L3_PROG_CLS) {
- pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry,"
- "Usr class 0x%llx not found \n",
- parent->index, np->dev->name,
- (unsigned long long) class);
+ netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
+ parent->index, __func__,
+ (unsigned long long)class);
ret = -EINVAL;
goto out;
}
@@ -8001,9 +7946,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
* won't get any interrupts and that's painful to debug.
*/
if (nr64(LDG_NUM(ldn)) != ldg) {
- dev_err(np->device, PFX "Port %u, mis-matched "
- "LDG assignment "
- "for ldn %d, should be %d is %llu\n",
+ dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
np->port, ldn, ldg,
(unsigned long long) nr64(LDG_NUM(ldn)));
return -EINVAL;
@@ -8056,7 +7999,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8071,7 +8014,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8152,8 +8095,9 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
s += i + 5;
sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
- niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
- vpd->fcode_major, vpd->fcode_minor);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: FCODE major(%d) minor(%d)\n",
+ vpd->fcode_major, vpd->fcode_minor);
if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
(vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
@@ -8173,8 +8117,8 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
#define FOUND_MASK_PHY 0x00000020
#define FOUND_MASK_ALL 0x0000003f
- niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n",
- start, end);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: start[%x] end[%x]\n", start, end);
while (start < end) {
int len, err, instance, type, prop_len;
char namebuf[64];
@@ -8228,8 +8172,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
}
if (max_len && prop_len > max_len) {
- dev_err(np->device, PFX "Property '%s' length (%d) is "
- "too long.\n", namebuf, prop_len);
+ dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
return -EINVAL;
}
@@ -8237,8 +8180,9 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
u32 off = start + 5 + err;
int i;
- niudbg(PROBE, "VPD_SCAN: Reading in property [%s] "
- "len[%d]\n", namebuf, prop_len);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: Reading in property [%s] len[%d]\n",
+ namebuf, prop_len);
for (i = 0; i < prop_len; i++)
*prop_buf++ = niu_pci_eeprom_read(np, off + i);
}
@@ -8402,8 +8346,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
u8 val8;
if (!is_valid_ether_addr(&vpd->local_mac[0])) {
- dev_err(np->device, PFX "VPD MAC invalid, "
- "falling back to SPROM.\n");
+ dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
@@ -8420,14 +8363,14 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
}
if (np->flags & NIU_FLAGS_10G)
- np->mac_xcvr = MAC_XCVR_XPCS;
+ np->mac_xcvr = MAC_XCVR_XPCS;
} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "Illegal phy string [%s].\n",
+ dev_err(np->device, "Illegal phy string [%s]\n",
np->vpd.phy_type);
- dev_err(np->device, PFX "Falling back to SPROM.\n");
+ dev_err(np->device, "Falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
}
@@ -8455,7 +8398,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->eeprom_len = len;
- niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Image size %llu\n", (unsigned long long)val);
sum = 0;
for (i = 0; i < len; i++) {
@@ -8465,10 +8409,10 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
sum += (val >> 16) & 0xff;
sum += (val >> 24) & 0xff;
}
- niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff));
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Checksum %x\n", (int)(sum & 0xff));
if ((sum & 0xff) != 0xab) {
- dev_err(np->device, PFX "Bad SPROM checksum "
- "(%x, should be 0xab)\n", (int) (sum & 0xff));
+ dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
return -EINVAL;
}
@@ -8491,11 +8435,12 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
ESPC_PHY_TYPE_PORT3_SHIFT;
break;
default:
- dev_err(np->device, PFX "Bogus port number %u\n",
+ dev_err(np->device, "Bogus port number %u\n",
np->port);
return -EINVAL;
}
- niudbg(PROBE, "SPROM: PHY type %x\n", val8);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: PHY type %x\n", val8);
switch (val8) {
case ESPC_PHY_TYPE_1G_COPPER:
@@ -8527,30 +8472,27 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8);
+ dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
return -EINVAL;
}
val = nr64(ESPC_MAC_ADDR0);
- niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
dev->perm_addr[0] = (val >> 0) & 0xff;
dev->perm_addr[1] = (val >> 8) & 0xff;
dev->perm_addr[2] = (val >> 16) & 0xff;
dev->perm_addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
- niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
dev->perm_addr[4] = (val >> 0) & 0xff;
dev->perm_addr[5] = (val >> 8) & 0xff;
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- dev_err(np->device, PFX "SPROM MAC address invalid\n");
- dev_err(np->device, PFX "[ \n");
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
+ dev->perm_addr);
return -EINVAL;
}
@@ -8562,8 +8504,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
val = nr64(ESPC_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 8 * 4)
return -EINVAL;
@@ -8578,8 +8520,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.model[val] = '\0';
val = nr64(ESPC_BD_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 4 * 4)
return -EINVAL;
@@ -8595,8 +8537,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.mac_num =
nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
- niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n",
- np->vpd.mac_num);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
return 0;
}
@@ -8629,8 +8571,6 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
}
}
- niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
- np->port, parent->num_ports);
if (np->port >= parent->num_ports)
return -ENODEV;
@@ -8659,14 +8599,12 @@ static int __devinit phy_record(struct niu_parent *parent,
pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
parent->index, id,
- (type == PHY_TYPE_PMA_PMD ?
- "PMA/PMD" :
- (type == PHY_TYPE_PCS ?
- "PCS" : "MII")),
+ type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
+ type == PHY_TYPE_PCS ? "PCS" : "MII",
phy_port);
if (p->cur[type] >= NIU_MAX_PORTS) {
- printk(KERN_ERR PFX "Too many PHY ports.\n");
+ pr_err("Too many PHY ports\n");
return -EINVAL;
}
idx = p->cur[type];
@@ -8727,8 +8665,7 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
parent->rxchan_per_port[i] = (16 / num_ports);
parent->txchan_per_port[i] = (16 / num_ports);
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8771,8 +8708,7 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
parent->rxchan_per_port[i] = rx_chans_per_1g;
parent->txchan_per_port[i] = tx_chans_per_1g;
}
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8781,23 +8717,20 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
}
if (tot_rx > NIU_NUM_RXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
parent->index, tot_rx);
for (i = 0; i < num_ports; i++)
parent->rxchan_per_port[i] = 1;
}
if (tot_tx > NIU_NUM_TXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
parent->index, tot_tx);
for (i = 0; i < num_ports; i++)
parent->txchan_per_port[i] = 1;
}
if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
- printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, "
- "RX[%d] TX[%d]\n",
- parent->index, tot_rx, tot_tx);
+ pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
+ parent->index, tot_rx, tot_tx);
}
}
@@ -8825,18 +8758,18 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
struct rdc_table *rt = &tp->tables[grp];
int slot;
- pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ",
+ pr_info("niu%d: Port %d RDC tbl(%d) [ ",
parent->index, i, tp->first_table_num + grp);
for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
rt->rxdma_channel[slot] =
rdc_channel_base + this_channel_offset;
- printk("%d ", rt->rxdma_channel[slot]);
+ pr_cont("%d ", rt->rxdma_channel[slot]);
if (++this_channel_offset == num_channels)
this_channel_offset = 0;
}
- printk("]\n");
+ pr_cont("]\n");
}
parent->rdc_default[i] = rdc_channel_base;
@@ -8996,8 +8929,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
break;
default:
- printk(KERN_ERR PFX "Unsupported port config "
- "10G[%d] 1G[%d]\n",
+ pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
num_10g, num_1g);
return -EINVAL;
}
@@ -9015,8 +8947,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
return 0;
unknown_vg_1g_port:
- printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n",
- lowest_1g);
+ pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
return -EINVAL;
}
@@ -9025,9 +8956,6 @@ static int __devinit niu_probe_ports(struct niu *np)
struct niu_parent *parent = np->parent;
int err, i;
- niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
- parent->port_phy);
-
if (parent->port_phy == PORT_PHY_UNKNOWN) {
err = walk_phys(np, parent);
if (err)
@@ -9048,9 +8976,6 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
{
struct niu_classifier *cp = &np->clas;
- niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
- np->parent->tcam_num_entries);
-
cp->tcam_top = (u16) np->port;
cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
cp->h1_init = 0xffffffff;
@@ -9116,8 +9041,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Port %u is invalid, cannot "
- "compute MAC block offset.\n", np->port);
+ dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
return -EINVAL;
}
@@ -9327,9 +9251,8 @@ static int __devinit niu_get_of_props(struct niu *np)
phy_type = of_get_property(dp, "phy-type", &prop_len);
if (!phy_type) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "phy-type property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks phy-type property\n",
+ dp->full_name);
return -EINVAL;
}
@@ -9339,34 +9262,26 @@ static int __devinit niu_get_of_props(struct niu *np)
strcpy(np->vpd.phy_type, phy_type);
if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "%s: Illegal phy string [%s].\n",
- dp->full_name, np->vpd.phy_type);
+ netdev_err(dev, "%s: Illegal phy string [%s]\n",
+ dp->full_name, np->vpd.phy_type);
return -EINVAL;
}
mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
if (!mac_addr) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "local-mac-address property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
+ dp->full_name);
return -EINVAL;
}
if (prop_len != dev->addr_len) {
- dev_err(np->device, PFX "%s: OF MAC address prop len (%d) "
- "is wrong.\n",
- dp->full_name, prop_len);
+ netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
+ dp->full_name, prop_len);
}
memcpy(dev->perm_addr, mac_addr, dev->addr_len);
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- int i;
-
- dev_err(np->device, PFX "%s: OF MAC address is invalid\n",
- dp->full_name);
- dev_err(np->device, PFX "%s: [ \n",
- dp->full_name);
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ netdev_err(dev, "%s: OF MAC address is invalid\n",
+ dp->full_name);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
return -EINVAL;
}
@@ -9414,8 +9329,8 @@ static int __devinit niu_get_invariants(struct niu *np)
nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
offset = niu_pci_vpd_offset(np);
- niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n",
- offset);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() VPD offset [%08x]\n", __func__, offset);
if (offset)
niu_pci_vpd_fetch(np, offset);
nw64(ESPC_PIO_EN, 0);
@@ -9575,8 +9490,6 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
struct niu_parent *p;
int i;
- niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
-
plat_dev = platform_device_register_simple("niu", niu_parent_index,
NULL, 0);
if (IS_ERR(plat_dev))
@@ -9641,9 +9554,6 @@ static struct niu_parent * __devinit niu_get_parent(struct niu *np,
struct niu_parent *p, *tmp;
int port = np->port;
- niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
- ptype, port);
-
mutex_lock(&niu_parent_lock);
p = NULL;
list_for_each_entry(tmp, &niu_parent_list, list) {
@@ -9681,7 +9591,8 @@ static void niu_put_parent(struct niu *np)
BUG_ON(!p || p->ports[port] != np);
- niudbg(PROBE, "niu_put_parent: port[%u]\n", port);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() port[%u]\n", __func__, port);
sprintf(port_name, "port%d", port);
@@ -9772,7 +9683,7 @@ static struct net_device * __devinit niu_alloc_and_init(
dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
if (!dev) {
- dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
return NULL;
}
@@ -9858,30 +9769,26 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, PFX "Cannot find proper PCI device "
- "base addresses, aborting.\n");
+ dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (pos <= 0) {
- dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
goto err_out_free_res;
}
@@ -9920,17 +9827,14 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HIGHDMA;
err = pci_set_consistent_dma_mask(pdev, dma_mask);
if (err) {
- dev_err(&pdev->dev, PFX "Unable to obtain 44 bit "
- "DMA for consistent allocations, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
goto err_out_release_parent;
}
}
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_out_release_parent;
}
}
@@ -9939,8 +9843,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
- dev_err(&pdev->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -9955,15 +9858,13 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&pdev->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
@@ -10157,7 +10058,7 @@ static int __devinit niu_of_probe(struct of_device *op,
reg = of_get_property(op->node, "reg", NULL);
if (!reg) {
- dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n",
+ dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
op->node->full_name);
return -ENODEV;
}
@@ -10186,8 +10087,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[1]),
"niu regs");
if (!np->regs) {
- dev_err(&op->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -10196,8 +10096,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[2]),
"niu vregs-1");
if (!np->vir_regs_1) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10206,8 +10105,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[3]),
"niu vregs-2");
if (!np->vir_regs_2) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10217,15 +10115,13 @@ static int __devinit niu_of_probe(struct of_device *op,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&op->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&op->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d41536..e88e97cd1b10 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -116,6 +116,7 @@
#include <linux/if_vlan.h>
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -1719,7 +1720,7 @@ static void ns83820_set_multicast(struct net_device *ndev)
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
- if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
+ if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev))
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
@@ -2292,7 +2293,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
-static struct pci_device_id ns83820_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
{ 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
{ 0, },
};
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 050538bf155a..8aadc8e2ddd7 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -13,6 +13,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/slab.h>
#include <linux/phy.h>
#include <linux/spinlock.h>
@@ -467,7 +468,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
int port = p->port;
- int i;
union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
unsigned long flags;
@@ -493,8 +493,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
if (netdev->flags & IFF_MULTICAST) {
- if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
- || netdev->mc_count > available_cam_entries)
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > available_cam_entries)
multicast_mode = 2; /* 1 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
@@ -511,12 +511,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
}
if (multicast_mode == 0) {
- i = netdev->mc_count;
- list = netdev->mc_list;
- while (i--) {
+ netdev_for_each_mc_addr(list, netdev)
octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
- list = list->next;
- }
}
@@ -1119,11 +1115,8 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
if (p->port >= octeon_bootinfo->mac_addr_count)
dev_err(&pdev->dev,
- "Error %s: Using MAC outside of the assigned range: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ "Error %s: Using MAC outside of the assigned range: %pM\n",
+ netdev->name, netdev->dev_addr);
if (register_netdev(netdev))
goto err;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1e..370c147d08a3 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
@@ -1875,7 +1876,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-static struct pci_device_id pasemi_mac_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
{ },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402affd..36785853a149 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -11,7 +11,7 @@
-----<snip>-----
- Written 1997-2000 by Donald Becker.
+ Written 1997-2000 by Donald Becker.
This software may be used and distributed according to the
terms of the GNU General Public License (GPL), incorporated
herein by reference. Drivers based on or derived from this
@@ -85,6 +85,8 @@ IVc. Errata
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -96,16 +98,15 @@ IVc. Errata
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define NETDRV_VERSION "1.0.1"
#define MODNAME "netdrv"
#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
-#define PFX MODNAME ": "
static char version[] __devinitdata =
-KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
-" Support available from http://foo.com/bar/baz.html\n";
+ KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
+ " Support available from http://foo.com/bar/baz.html\n";
/* define to 1 to enable PIO instead of MMIO */
#undef USE_IO_OPS
@@ -119,19 +120,24 @@ KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
#ifdef NETDRV_DEBUG
/* note: prints function name for you */
-# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+#define DPRINTK(fmt, args...) \
+ printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
#else
-# define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG fmt, ##args); \
+} while (0)
#endif
#ifdef NETDRV_NDEBUG
-# define assert(expr) do {} while (0)
+#define assert(expr) do {} while (0)
#else
-# define assert(expr) \
- if(!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__func__,__LINE__); \
- }
+#define assert(expr) \
+ if (!(expr)) { \
+ printk("Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ }
#endif
@@ -148,10 +154,10 @@ static int multicast_filter_limit = 32;
/* Size of the in-memory receive ring. */
#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
-#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
-#define RX_BUF_PAD 16
+#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+#define RX_BUF_PAD 16
#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
-#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
/* Number of Tx descriptor registers. */
#define NUM_TX_DESC 4
@@ -165,9 +171,11 @@ static int multicast_filter_limit = 32;
/* PCI Tuning Parameters
Threshold is bytes transferred to chip before transmission starts. */
-#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
-/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+/* The following settings are log_2(bytes)-4:
+ 0==16 bytes 1==32 2==64 3==128 4==256 5==512 6==1024 7==end of packet.
+*/
#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
@@ -175,8 +183,7 @@ static int multicast_filter_limit = 32;
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (6*HZ)
-
+#define TX_TIMEOUT (6 * HZ)
enum {
HAS_CHIP_XCVR = 0x020000,
@@ -186,7 +193,7 @@ enum {
#define NETDRV_MIN_IO_SIZE 0x80
#define RTL8139B_IO_SIZE 256
-#define NETDRV_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
+#define NETDRV_CAPS (HAS_CHIP_XCVR | HAS_LNK_CHNG)
typedef enum {
RTL8139 = 0,
@@ -211,7 +218,7 @@ static struct {
};
-static struct pci_device_id netdrv_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
@@ -220,7 +227,7 @@ static struct pci_device_id netdrv_pci_tbl[] = {
{0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
{0,}
};
-MODULE_DEVICE_TABLE (pci, netdrv_pci_tbl);
+MODULE_DEVICE_TABLE(pci, netdrv_pci_tbl);
/* The rest of these values should never change. */
@@ -270,7 +277,7 @@ enum NETDRV_registers {
enum ClearBitMasks {
MultiIntrClear = 0xF000,
ChipCmdClear = 0xE2,
- Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+ Config1Clear = (1 << 7) | (1 << 6) | (1 << 3) | (1 << 2) | (1 << 1),
};
enum ChipCmdBits {
@@ -329,7 +336,7 @@ enum tx_config_bits {
TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
TxClearAbt = (1 << 0), /* Clear abort (WO) */
- TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+ TxDMAShift = 8, /* DMA burst value(0-7) is shift this many bits */
TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
};
@@ -481,41 +488,44 @@ struct netdrv_private {
chip_t chipset;
};
-MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
-MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Skeleton for a PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
module_param(multicast_filter_limit, int, 0);
module_param(max_interrupt_work, int, 0);
module_param_array(media, int, NULL, 0);
-MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
-MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
-MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
-
-static int read_eeprom (void *ioaddr, int location, int addr_len);
-static int netdrv_open (struct net_device *dev);
-static int mdio_read (struct net_device *dev, int phy_id, int location);
-static void mdio_write (struct net_device *dev, int phy_id, int location,
- int val);
-static void netdrv_timer (unsigned long data);
-static void netdrv_tx_timeout (struct net_device *dev);
-static void netdrv_init_ring (struct net_device *dev);
-static int netdrv_start_xmit (struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t netdrv_interrupt (int irq, void *dev_instance);
-static int netdrv_close (struct net_device *dev);
-static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static void netdrv_set_rx_mode (struct net_device *dev);
-static void netdrv_hw_start (struct net_device *dev);
+MODULE_PARM_DESC(multicast_filter_limit,
+ MODNAME " maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(max_interrupt_work,
+ MODNAME " maximum events handled per interrupt");
+MODULE_PARM_DESC(media,
+ MODNAME " Bits 0-3: media type, bit 17: full duplex");
+
+static int read_eeprom(void *ioaddr, int location, int addr_len);
+static int netdrv_open(struct net_device *dev);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int val);
+static void netdrv_timer(unsigned long data);
+static void netdrv_tx_timeout(struct net_device *dev);
+static void netdrv_init_ring(struct net_device *dev);
+static int netdrv_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t netdrv_interrupt(int irq, void *dev_instance);
+static int netdrv_close(struct net_device *dev);
+static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void netdrv_set_rx_mode(struct net_device *dev);
+static void netdrv_hw_start(struct net_device *dev);
#ifdef USE_IO_OPS
-#define NETDRV_R8(reg) inb (((unsigned long)ioaddr) + (reg))
-#define NETDRV_R16(reg) inw (((unsigned long)ioaddr) + (reg))
-#define NETDRV_R32(reg) ((unsigned long) inl (((unsigned long)ioaddr) + (reg)))
-#define NETDRV_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_R8(reg) inb(((unsigned long)ioaddr) + (reg))
+#define NETDRV_R16(reg) inw(((unsigned long)ioaddr) + (reg))
+#define NETDRV_R32(reg) ((unsigned long)inl(((unsigned long)ioaddr) + (reg)))
+#define NETDRV_W8(reg, val8) outb((val8), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_W16(reg, val16) outw((val16), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_W32(reg, val32) outl((val32), ((unsigned long)ioaddr) + (reg))
#define NETDRV_W8_F NETDRV_W8
#define NETDRV_W16_F NETDRV_W16
#define NETDRV_W32_F NETDRV_W32
@@ -528,25 +538,37 @@ static void netdrv_hw_start (struct net_device *dev);
#define readb(addr) inb((unsigned long)(addr))
#define readw(addr) inw((unsigned long)(addr))
#define readl(addr) inl((unsigned long)(addr))
-#define writeb(val,addr) outb((val),(unsigned long)(addr))
-#define writew(val,addr) outw((val),(unsigned long)(addr))
-#define writel(val,addr) outl((val),(unsigned long)(addr))
+#define writeb(val, addr) outb((val), (unsigned long)(addr))
+#define writew(val, addr) outw((val), (unsigned long)(addr))
+#define writel(val, addr) outl((val), (unsigned long)(addr))
#else
/* write MMIO register, with flush */
/* Flush avoids rtl8139 bug w/ posted MMIO writes */
-#define NETDRV_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
-#define NETDRV_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
-#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+#define NETDRV_W8_F(reg, val8) \
+do { \
+ writeb((val8), ioaddr + (reg)); \
+ readb(ioaddr + (reg)); \
+} while (0)
+#define NETDRV_W16_F(reg, val16) \
+do { \
+ writew((val16), ioaddr + (reg)); \
+ readw(ioaddr + (reg)); \
+} while (0)
+#define NETDRV_W32_F(reg, val32) \
+do { \
+ writel((val32), ioaddr + (reg)); \
+ readl(ioaddr + (reg)); \
+} while (0)
#ifdef MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */
-#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg))
-#define NETDRV_W16(reg, val16) writew ((val16), ioaddr + (reg))
-#define NETDRV_W32(reg, val32) writel ((val32), ioaddr + (reg))
+#define NETDRV_W8(reg, val8) writeb((val8), ioaddr + (reg))
+#define NETDRV_W16(reg, val16) writew((val16), ioaddr + (reg))
+#define NETDRV_W32(reg, val32) writel((val32), ioaddr + (reg))
#else
@@ -558,9 +580,9 @@ static void netdrv_hw_start (struct net_device *dev);
#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
/* read MMIO register */
-#define NETDRV_R8(reg) readb (ioaddr + (reg))
-#define NETDRV_R16(reg) readw (ioaddr + (reg))
-#define NETDRV_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+#define NETDRV_R8(reg) readb(ioaddr + (reg))
+#define NETDRV_R16(reg) readw(ioaddr + (reg))
+#define NETDRV_R32(reg) ((unsigned long) readl(ioaddr + (reg)))
#endif /* USE_IO_OPS */
@@ -570,14 +592,14 @@ static const u16 netdrv_intr_mask =
TxErr | TxOK | RxErr | RxOK;
static const unsigned int netdrv_rx_config =
- RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
- (RX_FIFO_THRESH << RxCfgFIFOShift) |
- (RX_DMA_BURST << RxCfgDMAShift);
+ RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
-static int __devinit netdrv_init_board (struct pci_dev *pdev,
- struct net_device **dev_out,
- void **ioaddr_out)
+static int __devinit netdrv_init_board(struct pci_dev *pdev,
+ struct net_device **dev_out,
+ void **ioaddr_out)
{
void *ioaddr = NULL;
struct net_device *dev;
@@ -587,43 +609,43 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
u32 tmp;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- assert (pdev != NULL);
- assert (ioaddr_out != NULL);
+ assert(pdev != NULL);
+ assert(ioaddr_out != NULL);
*ioaddr_out = NULL;
*dev_out = NULL;
/* dev zeroed in alloc_etherdev */
- dev = alloc_etherdev (sizeof (*tp));
+ dev = alloc_etherdev(sizeof(*tp));
if (dev == NULL) {
dev_err(&pdev->dev, "unable to alloc new ethernet\n");
- DPRINTK ("EXIT, returning -ENOMEM\n");
+ DPRINTK("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
- /* enable device (incl. PCI PM wakeup), and bus-mastering */
- rc = pci_enable_device (pdev);
+ /* enable device(incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device(pdev);
if (rc)
goto err_out;
- pio_start = pci_resource_start (pdev, 0);
- pio_end = pci_resource_end (pdev, 0);
- pio_flags = pci_resource_flags (pdev, 0);
- pio_len = pci_resource_len (pdev, 0);
+ pio_start = pci_resource_start(pdev, 0);
+ pio_end = pci_resource_end(pdev, 0);
+ pio_flags = pci_resource_flags(pdev, 0);
+ pio_len = pci_resource_len(pdev, 0);
- mmio_start = pci_resource_start (pdev, 1);
- mmio_end = pci_resource_end (pdev, 1);
- mmio_flags = pci_resource_flags (pdev, 1);
- mmio_len = pci_resource_len (pdev, 1);
+ mmio_start = pci_resource_start(pdev, 1);
+ mmio_end = pci_resource_end(pdev, 1);
+ mmio_flags = pci_resource_flags(pdev, 1);
+ mmio_len = pci_resource_len(pdev, 1);
/* set this immediately, we need to know before
* we talk to the chip directly */
- DPRINTK("PIO region size == 0x%02X\n", pio_len);
- DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
+ DPRINTK("PIO region size == %#02X\n", pio_len);
+ DPRINTK("MMIO region size == %#02lX\n", mmio_len);
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
@@ -647,17 +669,17 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
goto err_out;
}
- rc = pci_request_regions (pdev, MODNAME);
+ rc = pci_request_regions(pdev, MODNAME);
if (rc)
goto err_out;
- pci_set_master (pdev);
+ pci_set_master(pdev);
#ifdef USE_IO_OPS
- ioaddr = (void *) pio_start;
+ ioaddr = (void *)pio_start;
#else
/* ioremap MMIO region */
- ioaddr = ioremap (mmio_start, mmio_len);
+ ioaddr = ioremap(mmio_start, mmio_len);
if (ioaddr == NULL) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
@@ -666,52 +688,50 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
#endif /* USE_IO_OPS */
/* Soft reset the chip. */
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
- if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
+ if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
break;
else
- udelay (10);
+ udelay(10);
/* Bring the chip out of low-power mode. */
/* <insert device-specific code here> */
#ifndef USE_IO_OPS
/* sanity checks -- ensure PIO and MMIO registers agree */
- assert (inb (pio_start+Config0) == readb (ioaddr+Config0));
- assert (inb (pio_start+Config1) == readb (ioaddr+Config1));
- assert (inb (pio_start+TxConfig) == readb (ioaddr+TxConfig));
- assert (inb (pio_start+RxConfig) == readb (ioaddr+RxConfig));
+ assert(inb(pio_start+Config0) == readb(ioaddr+Config0));
+ assert(inb(pio_start+Config1) == readb(ioaddr+Config1));
+ assert(inb(pio_start+TxConfig) == readb(ioaddr+TxConfig));
+ assert(inb(pio_start+RxConfig) == readb(ioaddr+RxConfig));
#endif /* !USE_IO_OPS */
/* identify chip attached to board */
- tmp = NETDRV_R8 (ChipVersion);
- for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--)
+ tmp = NETDRV_R8(ChipVersion);
+ for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--)
if (tmp == rtl_chip_info[i].version) {
tp->chipset = i;
goto match;
}
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
- dev_printk (KERN_DEBUG, &pdev->dev,
- "unknown chip version, assuming RTL-8139\n");
- dev_printk (KERN_DEBUG, &pdev->dev, "TxConfig = 0x%lx\n",
- NETDRV_R32 (TxConfig));
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "unknown chip version, assuming RTL-8139\n");
+ dev_printk(KERN_DEBUG, &pdev->dev, "TxConfig = %#lx\n",
+ NETDRV_R32(TxConfig));
tp->chipset = 0;
match:
- DPRINTK ("chipset id (%d) == index %d, '%s'\n",
- tmp,
- tp->chipset,
- rtl_chip_info[tp->chipset].name);
+ DPRINTK("chipset id(%d) == index %d, '%s'\n",
+ tmp, tp->chipset, rtl_chip_info[tp->chipset].name);
- rc = register_netdev (dev);
+ rc = register_netdev(dev);
if (rc)
goto err_out_unmap;
- DPRINTK ("EXIT, returning 0\n");
+ DPRINTK("EXIT, returning 0\n");
*ioaddr_out = ioaddr;
*dev_out = dev;
return 0;
@@ -721,10 +741,10 @@ err_out_unmap:
iounmap(ioaddr);
err_out_free_res:
#endif
- pci_release_regions (pdev);
+ pci_release_regions(pdev);
err_out:
- free_netdev (dev);
- DPRINTK ("EXIT, returning %d\n", rc);
+ free_netdev(dev);
+ DPRINTK("EXIT, returning %d\n", rc);
return rc;
}
@@ -740,8 +760,8 @@ static const struct net_device_ops netdrv_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit netdrv_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit netdrv_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct netdrv_private *tp;
@@ -756,29 +776,29 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
printk(version);
#endif
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- assert (pdev != NULL);
- assert (ent != NULL);
+ assert(pdev != NULL);
+ assert(ent != NULL);
board_idx++;
- i = netdrv_init_board (pdev, &dev, &ioaddr);
+ i = netdrv_init_board(pdev, &dev, &ioaddr);
if (i < 0) {
- DPRINTK ("EXIT, returning %d\n", i);
+ DPRINTK("EXIT, returning %d\n", i);
return i;
}
tp = netdev_priv(dev);
- assert (ioaddr != NULL);
- assert (dev != NULL);
- assert (tp != NULL);
+ assert(ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
- addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((u16 *) (dev->dev_addr))[i] =
- le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+ ((u16 *)(dev->dev_addr))[i] =
+ le16_to_cpu(read_eeprom(ioaddr, i + 7, addr_len));
dev->netdev_ops = &netdrv_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
@@ -791,7 +811,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
/* note: tp->chipset set in netdrv_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | NETDRV_CAPS;
+ PCI_COMMAND_MASTER | NETDRV_CAPS;
tp->pci_dev = pdev;
tp->board = ent->driver_data;
tp->mmio_addr = ioaddr;
@@ -801,18 +821,15 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
tp->phys[0] = 32;
- printk (KERN_INFO "%s: %s at 0x%lx, %pM IRQ %d\n",
- dev->name,
- board_info[ent->driver_data].name,
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ netdev_info(dev, "%s at %#lx, %pM IRQ %d\n",
+ board_info[ent->driver_data].name,
+ dev->base_addr, dev->dev_addr, dev->irq);
- printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
- dev->name, rtl_chip_info[tp->chipset].name);
+ netdev_printk(KERN_DEBUG, dev, "Identified 8139 chip type '%s'\n",
+ rtl_chip_info[tp->chipset].name);
/* Put the chip into low-power mode. */
- NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
/* The lower four bits are the media type. */
option = (board_idx > 7) ? 0 : media[board_idx];
@@ -824,45 +841,43 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
}
if (tp->full_duplex) {
- printk (KERN_INFO
- "%s: Media type forced to Full Duplex.\n",
- dev->name);
- mdio_write (dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+ netdev_info(dev, "Media type forced to Full Duplex\n");
+ mdio_write(dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
tp->duplex_lock = 1;
}
- DPRINTK ("EXIT - returning 0\n");
+ DPRINTK("EXIT - returning 0\n");
return 0;
}
-static void __devexit netdrv_remove_one (struct pci_dev *pdev)
+static void __devexit netdrv_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct netdrv_private *np;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- assert (dev != NULL);
+ assert(dev != NULL);
np = netdev_priv(dev);
- assert (np != NULL);
+ assert(np != NULL);
- unregister_netdev (dev);
+ unregister_netdev(dev);
#ifndef USE_IO_OPS
- iounmap (np->mmio_addr);
+ iounmap(np->mmio_addr);
#endif /* !USE_IO_OPS */
- pci_release_regions (pdev);
+ pci_release_regions(pdev);
- free_netdev (dev);
+ free_netdev(dev);
- pci_set_drvdata (pdev, NULL);
+ pci_set_drvdata(pdev, NULL);
- pci_disable_device (pdev);
+ pci_disable_device(pdev);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
@@ -870,63 +885,63 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev)
/* EEPROM_Ctrl bits. */
#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
-#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_CS 0x08 /* EEPROM chip select. */
#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
-#define EE_WRITE_0 0x00
-#define EE_WRITE_1 0x02
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
-#define EE_ENB (0x80 | EE_CS)
+#define EE_ENB (0x80 | EE_CS)
/* Delay between EEPROM clock transitions.
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
- */
+*/
#define eeprom_delay() readl(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
-#define EE_READ_CMD (6)
+#define EE_READ_CMD (6)
#define EE_ERASE_CMD (7)
-static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
+static int __devinit read_eeprom(void *ioaddr, int location, int addr_len)
{
int i;
unsigned retval = 0;
void *ee_addr = ioaddr + Cfg9346;
int read_cmd = location | (EE_READ_CMD << addr_len);
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- writeb (EE_ENB & ~EE_CS, ee_addr);
- writeb (EE_ENB, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB & ~EE_CS, ee_addr);
+ writeb(EE_ENB, ee_addr);
+ eeprom_delay();
/* Shift the read command bits out. */
for (i = 4 + addr_len; i >= 0; i--) {
int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
- writeb (EE_ENB | dataval, ee_addr);
- eeprom_delay ();
- writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ writeb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
}
- writeb (EE_ENB, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB, ee_addr);
+ eeprom_delay();
for (i = 16; i > 0; i--) {
- writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
retval =
- (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
- 0);
- writeb (EE_ENB, ee_addr);
- eeprom_delay ();
+ (retval << 1) | ((readb(ee_addr) & EE_DATA_READ) ? 1 :
+ 0);
+ writeb(EE_ENB, ee_addr);
+ eeprom_delay();
}
/* Terminate the EEPROM access. */
- writeb (~EE_CS, ee_addr);
- eeprom_delay ();
+ writeb(~EE_CS, ee_addr);
+ eeprom_delay();
- DPRINTK ("EXIT - returning %d\n", retval);
+ DPRINTK("EXIT - returning %d\n", retval);
return retval;
}
@@ -936,12 +951,12 @@ static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
-#define MDIO_DIR 0x80
+#define MDIO_DIR 0x80
#define MDIO_DATA_OUT 0x04
#define MDIO_DATA_IN 0x02
-#define MDIO_CLK 0x01
-#define MDIO_WRITE0 (MDIO_DIR)
-#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
#define mdio_delay() readb(mdio_addr)
@@ -959,24 +974,24 @@ static char mii_2_8139_map[8] = {
/* Syncronize the MII management interface by shifting 32 one bits out. */
-static void mdio_sync (void *mdio_addr)
+static void mdio_sync(void *mdio_addr)
{
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
for (i = 32; i >= 0; i--) {
- writeb (MDIO_WRITE1, mdio_addr);
- mdio_delay ();
- writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(MDIO_WRITE1, mdio_addr);
+ mdio_delay();
+ writeb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+ mdio_delay();
}
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
-static int mdio_read (struct net_device *dev, int phy_id, int location)
+static int mdio_read(struct net_device *dev, int phy_id, int location)
{
struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
@@ -984,97 +999,94 @@ static int mdio_read (struct net_device *dev, int phy_id, int location)
int retval = 0;
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
- DPRINTK ("EXIT after directly using 8139 internal regs\n");
+ DPRINTK("EXIT after directly using 8139 internal regs\n");
return location < 8 && mii_2_8139_map[location] ?
- readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
+ readw(tp->mmio_addr + mii_2_8139_map[location]) : 0;
}
- mdio_sync (mdio_addr);
+ mdio_sync(mdio_addr);
/* Shift the read command bits out. */
for (i = 15; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
- writeb (MDIO_DIR | dataval, mdio_addr);
- mdio_delay ();
- writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(MDIO_DIR | dataval, mdio_addr);
+ mdio_delay();
+ writeb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay();
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
- writeb (0, mdio_addr);
- mdio_delay ();
- retval =
- (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1
- : 0);
- writeb (MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(0, mdio_addr);
+ mdio_delay();
+ retval = ((retval << 1) | ((readb(mdio_addr) & MDIO_DATA_IN))
+ ? 1 : 0);
+ writeb(MDIO_CLK, mdio_addr);
+ mdio_delay();
}
- DPRINTK ("EXIT, returning %d\n", (retval >> 1) & 0xffff);
+ DPRINTK("EXIT, returning %d\n", (retval >> 1) & 0xffff);
return (retval >> 1) & 0xffff;
}
-static void mdio_write (struct net_device *dev, int phy_id, int location,
- int value)
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
{
struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd =
- (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+ (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
if (location < 8 && mii_2_8139_map[location]) {
- writew (value,
- tp->mmio_addr + mii_2_8139_map[location]);
- readw (tp->mmio_addr + mii_2_8139_map[location]);
+ writew(value,
+ tp->mmio_addr + mii_2_8139_map[location]);
+ readw(tp->mmio_addr + mii_2_8139_map[location]);
}
- DPRINTK ("EXIT after directly using 8139 internal regs\n");
+ DPRINTK("EXIT after directly using 8139 internal regs\n");
return;
}
- mdio_sync (mdio_addr);
+ mdio_sync(mdio_addr);
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval =
- (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
- writeb (dataval, mdio_addr);
- mdio_delay ();
- writeb (dataval | MDIO_CLK, mdio_addr);
- mdio_delay ();
+ (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ writeb(dataval, mdio_addr);
+ mdio_delay();
+ writeb(dataval | MDIO_CLK, mdio_addr);
+ mdio_delay();
}
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
- writeb (0, mdio_addr);
- mdio_delay ();
- writeb (MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(0, mdio_addr);
+ mdio_delay();
+ writeb(MDIO_CLK, mdio_addr);
+ mdio_delay();
}
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
-static int netdrv_open (struct net_device *dev)
+static int netdrv_open(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
int retval;
-#ifdef NETDRV_DEBUG
void *ioaddr = tp->mmio_addr;
-#endif
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- retval = request_irq (dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
+ retval = request_irq(dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
- DPRINTK ("EXIT, returning %d\n", retval);
+ DPRINTK("EXIT, returning %d\n", retval);
return retval;
}
@@ -1092,7 +1104,7 @@ static int netdrv_open (struct net_device *dev)
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
- DPRINTK ("EXIT, returning -ENOMEM\n");
+ DPRINTK("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
@@ -1100,109 +1112,108 @@ static int netdrv_open (struct net_device *dev)
tp->full_duplex = tp->duplex_lock;
tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
- netdrv_init_ring (dev);
- netdrv_hw_start (dev);
+ netdrv_init_ring(dev);
+ netdrv_hw_start(dev);
- DPRINTK ("%s: netdrv_open() ioaddr %#lx IRQ %d"
- " GP Pins %2.2x %s-duplex.\n",
- dev->name, pci_resource_start (tp->pci_dev, 1),
- dev->irq, NETDRV_R8 (MediaStatus),
- tp->full_duplex ? "full" : "half");
+ netdev_dbg(dev, "ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
+ (unsigned long long)pci_resource_start(tp->pci_dev, 1),
+ dev->irq, NETDRV_R8(MediaStatus),
+ tp->full_duplex ? "full" : "half");
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
- init_timer (&tp->timer);
+ init_timer(&tp->timer);
tp->timer.expires = jiffies + 3 * HZ;
tp->timer.data = (unsigned long) dev;
tp->timer.function = &netdrv_timer;
- add_timer (&tp->timer);
+ add_timer(&tp->timer);
- DPRINTK ("EXIT, returning 0\n");
+ DPRINTK("EXIT, returning 0\n");
return 0;
}
/* Start the hardware at open or resume. */
-static void netdrv_hw_start (struct net_device *dev)
+static void netdrv_hw_start(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
/* Soft reset the chip. */
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
- udelay (100);
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
+ udelay(100);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
- if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
+ if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
break;
/* Restore our idea of the MAC address. */
- NETDRV_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
- NETDRV_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+ NETDRV_W32_F(MAC0 + 0, cpu_to_le32(*(u32 *)(dev->dev_addr + 0)));
+ NETDRV_W32_F(MAC0 + 4, cpu_to_le32(*(u32 *)(dev->dev_addr + 4)));
/* Must enable Tx/Rx before setting transfer thresholds! */
- NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
+ NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
+ CmdRxEnb | CmdTxEnb);
i = netdrv_rx_config |
- (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- NETDRV_W32_F (RxConfig, i);
+ (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ NETDRV_W32_F(RxConfig, i);
/* Check this value: the documentation for IFG contradicts ifself. */
- NETDRV_W32 (TxConfig, (TX_DMA_BURST << TxDMAShift));
+ NETDRV_W32(TxConfig, (TX_DMA_BURST << TxDMAShift));
/* unlock Config[01234] and BMCR register writes */
- NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
- udelay (10);
+ NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
+ udelay(10);
tp->cur_rx = 0;
/* Lock Config[01234] and BMCR register writes */
- NETDRV_W8_F (Cfg9346, Cfg9346_Lock);
- udelay (10);
+ NETDRV_W8_F(Cfg9346, Cfg9346_Lock);
+ udelay(10);
/* init Rx ring buffer DMA address */
- NETDRV_W32_F (RxBuf, tp->rx_ring_dma);
+ NETDRV_W32_F(RxBuf, tp->rx_ring_dma);
/* init Tx buffer DMA addresses */
for (i = 0; i < NUM_TX_DESC; i++)
- NETDRV_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+ NETDRV_W32_F(TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
- NETDRV_W32_F (RxMissed, 0);
+ NETDRV_W32_F(RxMissed, 0);
- netdrv_set_rx_mode (dev);
+ netdrv_set_rx_mode(dev);
/* no early-rx interrupts */
- NETDRV_W16 (MultiIntr, NETDRV_R16 (MultiIntr) & MultiIntrClear);
+ NETDRV_W16(MultiIntr, NETDRV_R16(MultiIntr) & MultiIntrClear);
/* make sure RxTx has started */
- NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
+ NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
+ CmdRxEnb | CmdTxEnb);
/* Enable all known interrupts by setting the interrupt mask. */
- NETDRV_W16_F (IntrMask, netdrv_intr_mask);
+ NETDRV_W16_F(IntrMask, netdrv_intr_mask);
- netif_start_queue (dev);
+ netif_start_queue(dev);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void netdrv_init_ring (struct net_device *dev)
+static void netdrv_init_ring(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
tp->cur_rx = 0;
- atomic_set (&tp->cur_tx, 0);
- atomic_set (&tp->dirty_tx, 0);
+ atomic_set(&tp->cur_tx, 0);
+ atomic_set(&tp->dirty_tx, 0);
for (i = 0; i < NUM_TX_DESC; i++) {
tp->tx_info[i].skb = NULL;
@@ -1210,11 +1221,11 @@ static void netdrv_init_ring (struct net_device *dev)
tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
}
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
-static void netdrv_timer (unsigned long data)
+static void netdrv_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
struct netdrv_private *tp = netdev_priv(dev);
@@ -1222,58 +1233,54 @@ static void netdrv_timer (unsigned long data)
int next_tick = 60 * HZ;
int mii_lpa;
- mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
+ mii_lpa = mdio_read(dev, tp->phys[0], MII_LPA);
if (!tp->duplex_lock && mii_lpa != 0xffff) {
int duplex = ((mii_lpa & LPA_100FULL) ||
- (mii_lpa & 0x01C0) == 0x0040);
+ (mii_lpa & 0x01C0) == 0x0040);
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
- printk (KERN_INFO
- "%s: Setting %s-duplex based on MII #%d link"
- " partner ability of %4.4x.\n", dev->name,
- tp->full_duplex ? "full" : "half",
- tp->phys[0], mii_lpa);
- NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
- NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
- NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
+ NETDRV_W8(Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
+ NETDRV_W8(Cfg9346, Cfg9346_Lock);
}
}
- DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n",
- dev->name, NETDRV_R16 (NWayLPAR));
- DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x"
- " RxStatus %4.4x.\n", dev->name,
- NETDRV_R16 (IntrMask),
- NETDRV_R16 (IntrStatus),
- NETDRV_R32 (RxEarlyStatus));
- DPRINTK ("%s: Chip config %2.2x %2.2x.\n",
- dev->name, NETDRV_R8 (Config0),
- NETDRV_R8 (Config1));
+ netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
+ NETDRV_R16(NWayLPAR));
+ netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x RxStatus %04lx\n",
+ NETDRV_R16(IntrMask),
+ NETDRV_R16(IntrStatus),
+ NETDRV_R32(RxEarlyStatus));
+ netdev_dbg(dev, "Chip config %02x %02x\n",
+ NETDRV_R8(Config0), NETDRV_R8(Config1));
tp->timer.expires = jiffies + next_tick;
- add_timer (&tp->timer);
+ add_timer(&tp->timer);
}
-static void netdrv_tx_clear (struct net_device *dev)
+static void netdrv_tx_clear(struct net_device *dev)
{
int i;
struct netdrv_private *tp = netdev_priv(dev);
- atomic_set (&tp->cur_tx, 0);
- atomic_set (&tp->dirty_tx, 0);
+ atomic_set(&tp->cur_tx, 0);
+ atomic_set(&tp->dirty_tx, 0);
/* Dump the unsent Tx packets. */
for (i = 0; i < NUM_TX_DESC; i++) {
struct ring_info *rp = &tp->tx_info[i];
if (rp->mapping != 0) {
- pci_unmap_single (tp->pci_dev, rp->mapping,
- rp->skb->len, PCI_DMA_TODEVICE);
+ pci_unmap_single(tp->pci_dev, rp->mapping,
+ rp->skb->len, PCI_DMA_TODEVICE);
rp->mapping = 0;
}
if (rp->skb) {
- dev_kfree_skb (rp->skb);
+ dev_kfree_skb(rp->skb);
rp->skb = NULL;
dev->stats.tx_dropped++;
}
@@ -1281,7 +1288,7 @@ static void netdrv_tx_clear (struct net_device *dev)
}
-static void netdrv_tx_timeout (struct net_device *dev)
+static void netdrv_tx_timeout(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
@@ -1289,96 +1296,95 @@ static void netdrv_tx_timeout (struct net_device *dev)
u8 tmp8;
unsigned long flags;
- DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x "
- "media %2.2x.\n", dev->name,
- NETDRV_R8 (ChipCmd),
- NETDRV_R16 (IntrStatus),
- NETDRV_R8 (MediaStatus));
+ netdev_dbg(dev, "Transmit timeout, status %02x %04x media %02x\n",
+ NETDRV_R8(ChipCmd),
+ NETDRV_R16(IntrStatus),
+ NETDRV_R8(MediaStatus));
/* disable Tx ASAP, if not already */
- tmp8 = NETDRV_R8 (ChipCmd);
+ tmp8 = NETDRV_R8(ChipCmd);
if (tmp8 & CmdTxEnb)
- NETDRV_W8 (ChipCmd, tmp8 & ~CmdTxEnb);
+ NETDRV_W8(ChipCmd, tmp8 & ~CmdTxEnb);
/* Disable interrupts by clearing the interrupt mask. */
- NETDRV_W16 (IntrMask, 0x0000);
+ NETDRV_W16(IntrMask, 0x0000);
/* Emit info to figure out what went wrong. */
- printk (KERN_DEBUG "%s: Tx queue start entry %d dirty entry %d.\n",
- dev->name, atomic_read (&tp->cur_tx),
- atomic_read (&tp->dirty_tx));
+ netdev_dbg(dev, "Tx queue start entry %d dirty entry %d\n",
+ atomic_read(&tp->cur_tx),
+ atomic_read(&tp->dirty_tx));
for (i = 0; i < NUM_TX_DESC; i++)
- printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n",
- dev->name, i, NETDRV_R32 (TxStatus0 + (i * 4)),
- i == atomic_read (&tp->dirty_tx) % NUM_TX_DESC ?
- " (queue head)" : "");
+ netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
+ i, NETDRV_R32(TxStatus0 + (i * 4)),
+ i == atomic_read(&tp->dirty_tx) % NUM_TX_DESC ?
+ "(queue head)" : "");
/* Stop a shared interrupt from scavenging while we are. */
- spin_lock_irqsave (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
- netdrv_tx_clear (dev);
+ netdrv_tx_clear(dev);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_unlock_irqrestore(&tp->lock, flags);
/* ...and finally, reset everything */
- netdrv_hw_start (dev);
+ netdrv_hw_start(dev);
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
-static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
+static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int entry;
/* Calculate the next Tx descriptor entry. */
- entry = atomic_read (&tp->cur_tx) % NUM_TX_DESC;
+ entry = atomic_read(&tp->cur_tx) % NUM_TX_DESC;
- assert (tp->tx_info[entry].skb == NULL);
- assert (tp->tx_info[entry].mapping == 0);
+ assert(tp->tx_info[entry].skb == NULL);
+ assert(tp->tx_info[entry].mapping == 0);
tp->tx_info[entry].skb = skb;
/* tp->tx_info[entry].mapping = 0; */
skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
/* Note: the chip doesn't have auto-pad! */
- NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)),
- tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
+ tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
dev->trans_start = jiffies;
- atomic_inc (&tp->cur_tx);
- if ((atomic_read (&tp->cur_tx) - atomic_read (&tp->dirty_tx)) >= NUM_TX_DESC)
- netif_stop_queue (dev);
+ atomic_inc(&tp->cur_tx);
+ if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
+ netif_stop_queue(dev);
- DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n",
- dev->name, skb->data, skb->len, entry);
+ netdev_dbg(dev, "Queued Tx packet at %p size %u to slot %d\n",
+ skb->data, skb->len, entry);
return NETDEV_TX_OK;
}
-static void netdrv_tx_interrupt (struct net_device *dev,
- struct netdrv_private *tp,
- void *ioaddr)
+static void netdrv_tx_interrupt(struct net_device *dev,
+ struct netdrv_private *tp,
+ void *ioaddr)
{
int cur_tx, dirty_tx, tx_left;
- assert (dev != NULL);
- assert (tp != NULL);
- assert (ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
- dirty_tx = atomic_read (&tp->dirty_tx);
+ dirty_tx = atomic_read(&tp->dirty_tx);
- cur_tx = atomic_read (&tp->cur_tx);
+ cur_tx = atomic_read(&tp->cur_tx);
tx_left = cur_tx - dirty_tx;
while (tx_left > 0) {
int entry = dirty_tx % NUM_TX_DESC;
int txstatus;
- txstatus = NETDRV_R32 (TxStatus0 + (entry * sizeof (u32)));
+ txstatus = NETDRV_R32(TxStatus0 + (entry * sizeof(u32)));
if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
break; /* It still hasn't been Txed */
@@ -1386,12 +1392,12 @@ static void netdrv_tx_interrupt (struct net_device *dev,
/* Note: TxCarrierLost is always asserted at 100mbps. */
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
- DPRINTK ("%s: Transmit error, Tx status %8.8x.\n",
- dev->name, txstatus);
+ netdev_dbg(dev, "Transmit error, Tx status %#08x\n",
+ txstatus);
dev->stats.tx_errors++;
if (txstatus & TxAborted) {
dev->stats.tx_aborted_errors++;
- NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
+ NETDRV_W32(TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
}
if (txstatus & TxCarrierLost)
dev->stats.tx_carrier_errors++;
@@ -1417,48 +1423,45 @@ static void netdrv_tx_interrupt (struct net_device *dev,
PCI_DMA_TODEVICE);
tp->tx_info[entry].mapping = 0;
}
- dev_kfree_skb_irq (tp->tx_info[entry].skb);
+ dev_kfree_skb_irq(tp->tx_info[entry].skb);
tp->tx_info[entry].skb = NULL;
dirty_tx++;
if (dirty_tx < 0) { /* handle signed int overflow */
- atomic_sub (cur_tx, &tp->cur_tx); /* XXX racy? */
+ atomic_sub(cur_tx, &tp->cur_tx); /* XXX racy? */
dirty_tx = cur_tx - tx_left + 1;
}
- if (netif_queue_stopped (dev))
- netif_wake_queue (dev);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
- cur_tx = atomic_read (&tp->cur_tx);
+ cur_tx = atomic_read(&tp->cur_tx);
tx_left = cur_tx - dirty_tx;
}
#ifndef NETDRV_NDEBUG
- if (atomic_read (&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
- printk (KERN_ERR
- "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
- dev->name, dirty_tx, atomic_read (&tp->cur_tx));
+ if (atomic_read(&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
+ netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d\n",
+ dirty_tx, atomic_read(&tp->cur_tx));
dirty_tx += NUM_TX_DESC;
}
#endif /* NETDRV_NDEBUG */
- atomic_set (&tp->dirty_tx, dirty_tx);
+ atomic_set(&tp->dirty_tx, dirty_tx);
}
/* TODO: clean this up! Rx reset need not be this intensive */
-static void netdrv_rx_err (u32 rx_status, struct net_device *dev,
- struct netdrv_private *tp, void *ioaddr)
+static void netdrv_rx_err(u32 rx_status, struct net_device *dev,
+ struct netdrv_private *tp, void *ioaddr)
{
u8 tmp8;
int tmp_work = 1000;
- DPRINTK ("%s: Ethernet frame had errors, status %8.8x.\n",
- dev->name, rx_status);
- if (rx_status & RxTooLong) {
- DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
- dev->name, rx_status);
+ netdev_dbg(dev, "Ethernet frame had errors, status %08x\n", rx_status);
+ if (rx_status & RxTooLong)
+ netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
+ rx_status);
/* A.C.: The chip hangs here. */
- }
dev->stats.rx_errors++;
if (rx_status & (RxBadSymbol | RxBadAlign))
dev->stats.rx_frame_errors++;
@@ -1466,56 +1469,55 @@ static void netdrv_rx_err (u32 rx_status, struct net_device *dev,
dev->stats.rx_length_errors++;
if (rx_status & RxCRCErr)
dev->stats.rx_crc_errors++;
- /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+ /* Reset the receiver, based on RealTek recommendation.(Bug?) */
tp->cur_rx = 0;
/* disable receive */
- tmp8 = NETDRV_R8 (ChipCmd) & ChipCmdClear;
- NETDRV_W8_F (ChipCmd, tmp8 | CmdTxEnb);
+ tmp8 = NETDRV_R8(ChipCmd) & ChipCmdClear;
+ NETDRV_W8_F(ChipCmd, tmp8 | CmdTxEnb);
/* A.C.: Reset the multicast list. */
- netdrv_set_rx_mode (dev);
+ netdrv_set_rx_mode(dev);
/* XXX potentially temporary hack to
* restart hung receiver */
while (--tmp_work > 0) {
- tmp8 = NETDRV_R8 (ChipCmd);
+ tmp8 = NETDRV_R8(ChipCmd);
if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
break;
- NETDRV_W8_F (ChipCmd,
- (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
+ NETDRV_W8_F(ChipCmd,
+ (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
}
/* G.S.: Re-enable receiver */
/* XXX temporary hack to work around receiver hang */
- netdrv_set_rx_mode (dev);
+ netdrv_set_rx_mode(dev);
if (tmp_work <= 0)
- printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
+ netdev_warn(dev, "tx/rx enable wait too long\n");
}
/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
field alignments and semantics. */
-static void netdrv_rx_interrupt (struct net_device *dev,
- struct netdrv_private *tp, void *ioaddr)
+static void netdrv_rx_interrupt(struct net_device *dev,
+ struct netdrv_private *tp, void *ioaddr)
{
unsigned char *rx_ring;
u16 cur_rx;
- assert (dev != NULL);
- assert (tp != NULL);
- assert (ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
rx_ring = tp->rx_ring;
cur_rx = tp->cur_rx;
- DPRINTK ("%s: In netdrv_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
- NETDRV_R16 (RxBufAddr),
- NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
+ netdev_dbg(dev, "In netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ cur_rx, NETDRV_R16(RxBufAddr),
+ NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
- while ((NETDRV_R8 (ChipCmd) & RxBufEmpty) == 0) {
+ while ((NETDRV_R8(ChipCmd) & RxBufEmpty) == 0) {
int ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
unsigned int rx_size;
@@ -1523,32 +1525,25 @@ static void netdrv_rx_interrupt (struct net_device *dev,
struct sk_buff *skb;
/* read size+status of next frame from DMA ring buffer */
- rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+ rx_status = le32_to_cpu(*(u32 *)(rx_ring + ring_offset));
rx_size = rx_status >> 16;
pkt_size = rx_size - 4;
- DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x,"
- " cur %4.4x.\n", dev->name, rx_status,
- rx_size, cur_rx);
+ netdev_dbg(dev, "netdrv_rx() status %04x, size %04x, cur %04x\n",
+ rx_status, rx_size, cur_rx);
#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
- {
- int i;
- DPRINTK ("%s: Frame contents ", dev->name);
- for (i = 0; i < 70; i++)
- printk (" %2.2x",
- rx_ring[ring_offset + i]);
- printk (".\n");
- }
+ print_hex_dump_bytes("Frame contents: ", HEX_DUMP_OFFSET,
+ &rx_ring[ring_offset], 70);
#endif
/* If Rx err or invalid rx_size/rx_status received
- * (which happens if we get lost in the ring),
+ *(which happens if we get lost in the ring),
* Rx process gets reset, so we abort any further
* Rx processing.
*/
if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
(!(rx_status & RxStatusOK))) {
- netdrv_rx_err (rx_status, dev, tp, ioaddr);
+ netdrv_rx_err(rx_status, dev, tp, ioaddr);
return;
}
@@ -1561,71 +1556,67 @@ static void netdrv_rx_interrupt (struct net_device *dev,
* drop packets here under memory pressure.
*/
- skb = dev_alloc_skb (pkt_size + 2);
+ skb = dev_alloc_skb(pkt_size + 2);
if (skb) {
- skb_reserve (skb, 2); /* 16 byte align the IP fields. */
+ skb_reserve(skb, 2); /* 16 byte align the IP fields. */
- skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
- skb_put (skb, pkt_size);
+ skb_copy_to_linear_data(skb, &rx_ring[ring_offset + 4], pkt_size);
+ skb_put(skb, pkt_size);
- skb->protocol = eth_type_trans (skb, dev);
- netif_rx (skb);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
dev->stats.rx_bytes += pkt_size;
dev->stats.rx_packets++;
} else {
- printk (KERN_WARNING
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
- NETDRV_W16_F (RxBufPtr, cur_rx - 16);
+ NETDRV_W16_F(RxBufPtr, cur_rx - 16);
}
- DPRINTK ("%s: Done netdrv_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
- NETDRV_R16 (RxBufAddr),
- NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
+ netdev_dbg(dev, "Done netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ cur_rx, NETDRV_R16(RxBufAddr),
+ NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
tp->cur_rx = cur_rx;
}
-static void netdrv_weird_interrupt (struct net_device *dev,
- struct netdrv_private *tp,
- void *ioaddr,
- int status, int link_changed)
+static void netdrv_weird_interrupt(struct net_device *dev,
+ struct netdrv_private *tp,
+ void *ioaddr,
+ int status, int link_changed)
{
- printk (KERN_DEBUG "%s: Abnormal interrupt, status %8.8x.\n",
- dev->name, status);
+ netdev_printk(KERN_DEBUG, dev, "Abnormal interrupt, status %08x\n",
+ status);
- assert (dev != NULL);
- assert (tp != NULL);
- assert (ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
/* Update the error count. */
- dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
- NETDRV_W32 (RxMissed, 0);
+ dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
+ NETDRV_W32(RxMissed, 0);
if ((status & RxUnderrun) && link_changed &&
(tp->drv_flags & HAS_LNK_CHNG)) {
/* Really link-change on new chips. */
- int lpar = NETDRV_R16 (NWayLPAR);
+ int lpar = NETDRV_R16(NWayLPAR);
int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 ||
- tp->duplex_lock);
+ tp->duplex_lock);
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
- NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
- NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
- NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ NETDRV_W8(Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
+ NETDRV_W8(Cfg9346, Cfg9346_Lock);
}
status &= ~RxUnderrun;
}
/* XXX along with netdrv_rx_err, are we double-counting errors? */
- if (status &
- (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+ if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
dev->stats.rx_errors++;
if (status & (PCSTimeout))
@@ -1634,22 +1625,21 @@ static void netdrv_weird_interrupt (struct net_device *dev,
dev->stats.rx_fifo_errors++;
if (status & RxOverflow) {
dev->stats.rx_over_errors++;
- tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN;
- NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16);
+ tp->cur_rx = NETDRV_R16(RxBufAddr) % RX_BUF_LEN;
+ NETDRV_W16_F(RxBufPtr, tp->cur_rx - 16);
}
if (status & PCIErr) {
u16 pci_cmd_status;
- pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+ pci_read_config_word(tp->pci_dev, PCI_STATUS, &pci_cmd_status);
- printk (KERN_ERR "%s: PCI Bus error %4.4x.\n",
- dev->name, pci_cmd_status);
+ netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
}
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
-static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
+static irqreturn_t netdrv_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
struct netdrv_private *tp = netdev_priv(dev);
@@ -1658,22 +1648,21 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
int handled = 0;
- spin_lock (&tp->lock);
+ spin_lock(&tp->lock);
do {
- status = NETDRV_R16 (IntrStatus);
+ status = NETDRV_R16(IntrStatus);
- /* h/w no longer present (hotplug?) or major error, bail */
+ /* h/w no longer present(hotplug?) or major error, bail */
if (status == 0xFFFF)
break;
handled = 1;
/* Acknowledge all of the current interrupt sources ASAP */
- NETDRV_W16_F (IntrStatus, status);
+ NETDRV_W16_F(IntrStatus, status);
- DPRINTK ("%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
- dev->name, status,
- NETDRV_R16 (IntrStatus));
+ netdev_dbg(dev, "interrupt status=%#04x new intstat=%#04x\n",
+ status, NETDRV_R16(IntrStatus));
if ((status &
(PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
@@ -1682,69 +1671,67 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
/* Check uncommon events with one test. */
if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
- RxFIFOOver | TxErr | RxErr))
- netdrv_weird_interrupt (dev, tp, ioaddr,
- status, link_changed);
+ RxFIFOOver | TxErr | RxErr))
+ netdrv_weird_interrupt(dev, tp, ioaddr,
+ status, link_changed);
if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
- netdrv_rx_interrupt (dev, tp, ioaddr);
+ netdrv_rx_interrupt(dev, tp, ioaddr);
if (status & (TxOK | TxErr))
- netdrv_tx_interrupt (dev, tp, ioaddr);
+ netdrv_tx_interrupt(dev, tp, ioaddr);
boguscnt--;
} while (boguscnt > 0);
if (boguscnt <= 0) {
- printk (KERN_WARNING
- "%s: Too much work at interrupt, "
- "IntrStatus=0x%4.4x.\n", dev->name,
- status);
+ netdev_warn(dev, "Too much work at interrupt, IntrStatus=%#04x\n",
+ status);
/* Clear all interrupt sources. */
- NETDRV_W16 (IntrStatus, 0xffff);
+ NETDRV_W16(IntrStatus, 0xffff);
}
- spin_unlock (&tp->lock);
+ spin_unlock(&tp->lock);
- DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
- dev->name, NETDRV_R16 (IntrStatus));
+ netdev_dbg(dev, "exiting interrupt, intr_status=%#04x\n",
+ NETDRV_R16(IntrStatus));
return IRQ_RETVAL(handled);
}
-static int netdrv_close (struct net_device *dev)
+static int netdrv_close(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- DPRINTK ("%s: Shutting down ethercard, status was 0x%4.4x.\n",
- dev->name, NETDRV_R16 (IntrStatus));
+ netdev_dbg(dev, "Shutting down ethercard, status was %#04x\n",
+ NETDRV_R16(IntrStatus));
- del_timer_sync (&tp->timer);
+ del_timer_sync(&tp->timer);
- spin_lock_irqsave (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
/* Stop the chip's Tx and Rx DMA processes. */
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
/* Disable interrupts by clearing the interrupt mask. */
- NETDRV_W16 (IntrMask, 0x0000);
+ NETDRV_W16(IntrMask, 0x0000);
/* Update the error counts. */
- dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
- NETDRV_W32 (RxMissed, 0);
+ dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
+ NETDRV_W32(RxMissed, 0);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_unlock_irqrestore(&tp->lock, flags);
- free_irq (dev->irq, dev);
+ free_irq(dev->irq, dev);
- netdrv_tx_clear (dev);
+ netdrv_tx_clear(dev);
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
@@ -1754,23 +1741,23 @@ static int netdrv_close (struct net_device *dev)
tp->tx_bufs = NULL;
/* Green! Put the chip in low-power mode. */
- NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
- NETDRV_W8 (Config1, 0x03);
- NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ NETDRV_W8(Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8(Config1, 0x03);
+ NETDRV_W8(Cfg9346, Cfg9346_Lock);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
return 0;
}
-static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct netdrv_private *tp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
unsigned long flags;
int rc = 0;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
@@ -1778,15 +1765,15 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
/* Fall Through */
case SIOCGMIIREG: /* Read MII PHY register. */
- spin_lock_irqsave (&tp->lock, flags);
- data->val_out = mdio_read (dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
+ data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ spin_unlock_irqrestore(&tp->lock, flags);
break;
case SIOCSMIIREG: /* Write MII PHY register. */
- spin_lock_irqsave (&tp->lock, flags);
- mdio_write (dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
+ mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_irqrestore(&tp->lock, flags);
break;
default:
@@ -1794,43 +1781,43 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
break;
}
- DPRINTK ("EXIT, returning %d\n", rc);
+ DPRINTK("EXIT, returning %d\n", rc);
return rc;
}
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
-static void netdrv_set_rx_mode (struct net_device *dev)
+static void netdrv_set_rx_mode(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- DPRINTK ("%s: netdrv_set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
- dev->name, dev->flags, NETDRV_R32 (RxConfig));
+ netdev_dbg(dev, "%s(%04x) done -- Rx config %08lx\n",
+ __func__, dev->flags, NETDRV_R32(RxConfig));
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
+
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -1838,66 +1825,66 @@ static void netdrv_set_rx_mode (struct net_device *dev)
}
/* if called from irq handler, lock already acquired */
- if (!in_irq ())
- spin_lock_irq (&tp->lock);
+ if (!in_irq())
+ spin_lock_irq(&tp->lock);
/* We can safely update without stopping the chip. */
tmp = netdrv_rx_config | rx_mode |
- (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- NETDRV_W32_F (RxConfig, tmp);
- NETDRV_W32_F (MAR0 + 0, mc_filter[0]);
- NETDRV_W32_F (MAR0 + 4, mc_filter[1]);
+ (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ NETDRV_W32_F(RxConfig, tmp);
+ NETDRV_W32_F(MAR0 + 0, mc_filter[0]);
+ NETDRV_W32_F(MAR0 + 4, mc_filter[1]);
- if (!in_irq ())
- spin_unlock_irq (&tp->lock);
+ if (!in_irq())
+ spin_unlock_irq(&tp->lock);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
#ifdef CONFIG_PM
-static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
+static int netdrv_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
if (!netif_running(dev))
return 0;
- netif_device_detach (dev);
+ netif_device_detach(dev);
- spin_lock_irqsave (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
/* Disable interrupts, stop Tx and Rx. */
- NETDRV_W16 (IntrMask, 0x0000);
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
+ NETDRV_W16(IntrMask, 0x0000);
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
/* Update the error counts. */
- dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
- NETDRV_W32 (RxMissed, 0);
+ dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
+ NETDRV_W32(RxMissed, 0);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_unlock_irqrestore(&tp->lock, flags);
- pci_save_state (pdev);
- pci_set_power_state (pdev, PCI_D3hot);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int netdrv_resume (struct pci_dev *pdev)
+static int netdrv_resume(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
/*struct netdrv_private *tp = netdev_priv(dev);*/
if (!netif_running(dev))
return 0;
- pci_set_power_state (pdev, PCI_D0);
- pci_restore_state (pdev);
- netif_device_attach (dev);
- netdrv_hw_start (dev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ netif_device_attach(dev);
+ netdrv_hw_start(dev);
return 0;
}
@@ -1917,7 +1904,7 @@ static struct pci_driver netdrv_pci_driver = {
};
-static int __init netdrv_init_module (void)
+static int __init netdrv_init_module(void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
@@ -1927,9 +1914,9 @@ static int __init netdrv_init_module (void)
}
-static void __exit netdrv_cleanup_module (void)
+static void __exit netdrv_cleanup_module(void)
{
- pci_unregister_driver (&netdrv_pci_driver);
+ pci_unregister_driver(&netdrv_pci_driver);
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 98938ea9e0bd..757f87bb1db3 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -781,8 +781,13 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
inw(ioaddr + EL3_STATUS));
spin_lock_irqsave(&lp->window_lock, flags);
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* Put out the doubleword header... */
outw(skb->len, ioaddr + TX_FIFO);
outw(0, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
dev->trans_start = jiffies;
@@ -1021,8 +1026,6 @@ static void update_stats(struct net_device *dev)
/* BadSSD */ inb(ioaddr + 12);
up = inb(ioaddr + 13);
- dev->stats.tx_bytes += tx + ((up & 0xf0) << 12);
-
EL3WINDOW(1);
}
@@ -1148,7 +1151,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 322e11df0097..091e0b00043e 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -886,7 +886,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
opts |= RxMulticast | RxProm;
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
opts |= RxMulticast;
outw(opts, ioaddr + EL3_CMD);
}
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d431b59e7d11..9f3d593f14ed 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
@@ -779,6 +778,7 @@ static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061),
PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
@@ -1065,14 +1065,11 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&ei_local->page_lock, flags);
outb_p(0x00, e8390_base + EN0_IMR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
/*
* Slow phase with lock held.
*/
- spin_lock_irqsave(&ei_local->page_lock, flags);
-
ei_local->irqlock = 1;
send_length = max(length, ETH_ZLEN);
@@ -1628,8 +1625,7 @@ static inline void make_mc_bits(u8 *bits, struct net_device *dev)
struct dev_mc_list *dmi;
u32 crc;
- for (dmi=dev->mc_list; dmi; dmi=dmi->next) {
-
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
/*
* The 8390 uses the 6 most significant bits of the
@@ -1655,7 +1651,7 @@ static void do_set_multicast_list(struct net_device *dev)
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
memset(ei_local->mcfilter, 0, 8);
- if (dev->mc_list)
+ if (!netdev_mc_empty(dev))
make_mc_bits(ei_local->mcfilter, dev);
} else {
/* set to accept-all */
@@ -1671,7 +1667,7 @@ static void do_set_multicast_list(struct net_device *dev)
if(dev->flags&IFF_PROMISC)
outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
- else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
else
outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 813aca3fc433..b9dc80b9d04a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
@@ -1186,22 +1187,20 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
- unsigned int bit =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(mclist, dev) {
+ unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit >> 3] |= (1 << (bit & 7));
}
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 8a5ae3b182ed..c717b143f11a 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1402,7 +1402,6 @@ static void BuildLAF(int *ladrf, int *adr)
for (i = 0; i < 8; i++)
printk(KERN_CONT " %02X", ladrf[i]);
printk(KERN_CONT "\n");
- }
#endif
} /* BuildLAF */
@@ -1476,14 +1475,13 @@ static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
- int i;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
@@ -1491,15 +1489,14 @@ static void set_multicast_list(struct net_device *dev)
#endif
/* Set multicast_num_addrs. */
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
/* Set multicast_ladrf. */
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
- dmi = dmi->next;
BuildLAF(lp->multicast_ladrf, adr);
}
}
@@ -1538,15 +1535,15 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
}
#endif
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
restore_multicast_list(dev);
} /* set_multicast_list */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 92ed3fbf89a5..4c0368de1815 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
@@ -1549,6 +1548,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
@@ -1740,8 +1740,8 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
- PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
- PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
@@ -1754,7 +1754,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
MODULE_FIRMWARE("cis/PCMLM28.cis");
MODULE_FIRMWARE("cis/DP83903.cis");
MODULE_FIRMWARE("cis/LA-PCM.cis");
-MODULE_FIRMWARE("PE520.cis");
+MODULE_FIRMWARE("cis/PE520.cis");
MODULE_FIRMWARE("cis/NE2K.cis");
MODULE_FIRMWARE("cis/PE-200.cis");
MODULE_FIRMWARE("cis/tamarack.cis");
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 6dd486d2977b..ccc553782a0d 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -453,8 +453,7 @@ static int mhz_mfc_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.IOAddrLines = 16;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -494,13 +493,14 @@ static int pcmcia_get_versmac(struct pcmcia_device *p_dev,
{
struct net_device *dev = priv;
cisparse_t parse;
+ u8 *buf;
if (pcmcia_parse_tuple(tuple, &parse))
return -EINVAL;
- if ((parse.version_1.ns > 3) &&
- (cvt_ascii_address(dev,
- (parse.version_1.str + parse.version_1.ofs[3]))))
+ buf = parse.version_1.str + parse.version_1.ofs[3];
+
+ if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0))
return 0;
return -EINVAL;
@@ -529,7 +529,7 @@ static int mhz_setup(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x81, &buf);
if (buf && len >= 13) {
buf[12] = '\0';
- if (cvt_ascii_address(dev, buf))
+ if (cvt_ascii_address(dev, buf) == 0)
rc = 0;
}
kfree(buf);
@@ -652,8 +652,7 @@ static int osi_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 64;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -912,7 +911,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if (i != 0) {
printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
- goto config_undo;
+ goto config_failed;
}
smc->duplex = 0;
@@ -1000,6 +999,7 @@ config_undo:
unregister_netdev(dev);
config_failed:
smc91c92_release(link);
+ free_netdev(dev);
return -ENODEV;
} /* smc91c92_config */
@@ -1595,27 +1595,6 @@ static void smc_rx(struct net_device *dev)
/*======================================================================
- Calculate values for the hardware multicast filter hash table.
-
-======================================================================*/
-
-static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
- u_char *multicast_table)
-{
- struct dev_mc_list *mc_addr;
-
- for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
- u_int position = ether_crc(6, mc_addr->dmi_addr);
-#ifndef final_version /* Verify multicast address. */
- if ((mc_addr->dmi_addr[0] & 1) == 0)
- continue;
-#endif
- multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
- }
-}
-
-/*======================================================================
-
Set the receive mode.
This routine is used by both the protocol level to notify us of
@@ -1629,18 +1608,25 @@ static void set_rx_mode(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
- u_int multicast_table[ 2 ] = { 0, };
+ unsigned char multicast_table[8];
unsigned long flags;
u_short rx_cfg_setting;
+ int i;
+
+ memset(multicast_table, 0, sizeof(multicast_table));
if (dev->flags & IFF_PROMISC) {
rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
} else if (dev->flags & IFF_ALLMULTI)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
- if (dev->mc_count) {
- fill_multicast_tbl(dev->mc_count, dev->mc_list,
- (u_char *)multicast_table);
+ if (!netdev_mc_empty(dev)) {
+ struct dev_mc_list *mc_addr;
+
+ netdev_for_each_mc_addr(mc_addr, dev) {
+ u_int position = ether_crc(6, mc_addr->dmi_addr);
+ multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
+ }
}
rx_cfg_setting = RxStripCRC | RxEnable;
}
@@ -1648,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev)
/* Load MC table and Rx setting into the chip without interrupts. */
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
- outl(multicast_table[0], ioaddr + MULTICAST0);
- outl(multicast_table[1], ioaddr + MULTICAST4);
+ for (i = 0; i < 8; i++)
+ outb(multicast_table[i], ioaddr + MULTICAST0 + i);
SMC_SELECT_BANK(0);
outw(rx_cfg_setting, ioaddr + RCR);
SMC_SELECT_BANK(2);
@@ -1818,23 +1804,30 @@ static void media_check(u_long arg)
SMC_SELECT_BANK(1);
media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+
/* Check for pending interrupt with watchdog flag set: with
this, we can limp along even if the interrupt is blocked */
if (smc->watchdog++ && ((i>>8) & i)) {
if (!smc->fast_poll)
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ local_irq_save(flags);
smc_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
smc->fast_poll = HZ;
}
if (smc->fast_poll) {
smc->fast_poll--;
smc->media.expires = jiffies + HZ/100;
add_timer(&smc->media);
- SMC_SELECT_BANK(saved_bank);
- spin_unlock_irqrestore(&smc->lock, flags);
return;
}
+ spin_lock_irqsave(&smc->lock, flags);
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
if (smc->cfg & CFG_MII_SELECT) {
if (smc->mii_if.phy_id < 0)
goto reschedule;
@@ -1992,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_gset(&smc->mii_if, ecmd);
else
ret = smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -2010,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_sset(&smc->mii_if, ecmd);
else
ret = smc_netdev_set_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -2028,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
u32 ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
ret = smc_link_ok(dev);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -2070,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
int rc = 0;
u16 saved_bank;
unsigned int ioaddr = dev->base_addr;
+ unsigned long flags;
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
saved_bank = inw(ioaddr + BANK_SELECT);
SMC_SELECT_BANK(3);
rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return rc;
}
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 466fc72698c0..4d1802e457be 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1364,47 +1364,63 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+struct set_address_info {
+ int reg_nr;
+ int page_nr;
+ int mohawk;
+ unsigned int ioaddr;
+};
+
+static void set_address(struct set_address_info *sa_info, char *addr)
+{
+ unsigned int ioaddr = sa_info->ioaddr;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (sa_info->reg_nr > 15) {
+ sa_info->reg_nr = 8;
+ sa_info->page_nr++;
+ SelectPage(sa_info->page_nr);
+ }
+ if (sa_info->mohawk)
+ PutByte(sa_info->reg_nr++, addr[5 - i]);
+ else
+ PutByte(sa_info->reg_nr++, addr[i]);
+ }
+}
+
/****************
* Set all addresses: This first one is the individual address,
* the next 9 addresses are taken from the multicast list and
* the rest is filled with the individual address.
*/
-static void
-set_addresses(struct net_device *dev)
+static void set_addresses(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- local_info_t *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
- unsigned char *addr;
- int i,j,k,n;
-
- SelectPage(k=0x50);
- for (i=0,j=8,n=0; ; i++, j++) {
- if (i > 5) {
- if (++n > 9)
- break;
- i = 0;
- if (n > 1 && n <= dev->mc_count && dmi) {
- dmi = dmi->next;
- }
- }
- if (j > 15) {
- j = 8;
- k++;
- SelectPage(k);
- }
-
- if (n && n <= dev->mc_count && dmi)
- addr = dmi->dmi_addr;
- else
- addr = dev->dev_addr;
+ unsigned int ioaddr = dev->base_addr;
+ local_info_t *lp = netdev_priv(dev);
+ struct dev_mc_list *dmi;
+ struct set_address_info sa_info;
+ int i;
- if (lp->mohawk)
- PutByte(j, addr[5-i]);
- else
- PutByte(j, addr[i]);
- }
- SelectPage(0);
+ /*
+ * Setup the info structure so that by first set_address call it will do
+ * SelectPage with the right page number. Hence these ones here.
+ */
+ sa_info.reg_nr = 15 + 1;
+ sa_info.page_nr = 0x50 - 1;
+ sa_info.mohawk = lp->mohawk;
+ sa_info.ioaddr = ioaddr;
+
+ set_address(&sa_info, dev->dev_addr);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (i++ == 9)
+ break;
+ set_address(&sa_info, dmi->dmi_addr);
+ }
+ while (i++ < 9)
+ set_address(&sa_info, dev->dev_addr);
+ SelectPage(0);
}
/****************
@@ -1424,9 +1440,9 @@ set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* snoop */
PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
- } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) {
PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* the chip can filter 9 addresses perfectly */
PutByte(XIRCREG42_SWC1, value | 0x01);
SelectPage(0x40);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index dcc67a35e8f2..084d78dd1637 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -21,6 +21,8 @@
*
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "pcnet32"
#define DRV_VERSION "1.35"
#define DRV_RELDATE "21.Apr.2008"
@@ -45,20 +47,21 @@ static const char *const version =
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
#include <asm/irq.h>
/*
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
-static struct pci_device_id pcnet32_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
@@ -82,7 +85,7 @@ static int cards_found;
static unsigned int pcnet32_portlist[] __initdata =
{ 0x300, 0x320, 0x340, 0x360, 0 };
-static int pcnet32_debug = 0;
+static int pcnet32_debug;
static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
static int pcnet32vlb; /* check for VLB cards ? */
@@ -389,7 +392,7 @@ static struct pcnet32_access pcnet32_wio = {
static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
- return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
}
static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
@@ -401,7 +404,7 @@ static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
- return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
}
static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
@@ -412,7 +415,7 @@ static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
static u16 pcnet32_dwio_read_rap(unsigned long addr)
{
- return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
}
static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
@@ -486,10 +489,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
(1 << size),
&new_ring_dma_addr);
if (new_tx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Consistent memory allocation failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
@@ -497,18 +497,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_tx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_lists;
}
@@ -528,15 +524,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
lp->tx_skbuff = new_skb_list;
return;
- free_new_lists:
+free_new_lists:
kfree(new_dma_addr_list);
- free_new_tx_ring:
+free_new_tx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
(1 << size),
new_tx_ring,
new_ring_dma_addr);
- return;
}
/*
@@ -564,10 +559,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
(1 << size),
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Consistent memory allocation failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
@@ -575,18 +567,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_rx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_lists;
}
@@ -598,15 +586,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
- for (; new < size; new++ ) {
+ for (; new < size; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
- if (!(rx_skbuff = new_skb_list[new])) {
+ rx_skbuff = new_skb_list[new];
+ if (!rx_skbuff) {
/* keep the original lists and buffers */
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
goto free_all_new;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -643,8 +630,8 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
lp->rx_skbuff = new_skb_list;
return;
- free_all_new:
- for (; --new >= lp->rx_ring_size; ) {
+free_all_new:
+ while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
@@ -652,9 +639,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
}
}
kfree(new_skb_list);
- free_new_lists:
+free_new_lists:
kfree(new_dma_addr_list);
- free_new_rx_ring:
+free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
(1 << size),
@@ -837,16 +824,14 @@ static int pcnet32_set_ringparam(struct net_device *dev,
spin_unlock_irqrestore(&lp->lock, flags);
- if (netif_msg_drv(lp))
- printk(KERN_INFO
- "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
- lp->rx_ring_size, lp->tx_ring_size);
+ netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
+ lp->rx_ring_size, lp->tx_ring_size);
return 0;
}
static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
- u8 * data)
+ u8 *data)
{
memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
}
@@ -870,17 +855,15 @@ static void pcnet32_ethtool_test(struct net_device *dev,
if (test->flags == ETH_TEST_FL_OFFLINE) {
rc = pcnet32_loopback_test(dev, data);
if (rc) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test failed.\n",
- dev->name);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test failed\n");
test->flags |= ETH_TEST_FL_FAILED;
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test passed.\n",
- dev->name);
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: No tests to run (specify 'Offline' on ethtool).",
- dev->name);
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test passed\n");
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "No tests to run (specify 'Offline' on ethtool)\n");
} /* end pcnet32_ethtool_test */
static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
@@ -925,40 +908,39 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
/* Initialize Transmit buffers. */
size = data_len + 15;
for (x = 0; x < numbuffs; x++) {
- if (!(skb = dev_alloc_skb(size))) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Cannot allocate skb at line: %d!\n",
- dev->name, __LINE__);
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Cannot allocate skb at line: %d!\n",
+ __LINE__);
goto clean_up;
- } else {
- packet = skb->data;
- skb_put(skb, size); /* create space for data */
- lp->tx_skbuff[x] = skb;
- lp->tx_ring[x].length = cpu_to_le16(-skb->len);
- lp->tx_ring[x].misc = 0;
-
- /* put DA and SA into the skb */
- for (i = 0; i < 6; i++)
- *packet++ = dev->dev_addr[i];
- for (i = 0; i < 6; i++)
- *packet++ = dev->dev_addr[i];
- /* type */
- *packet++ = 0x08;
- *packet++ = 0x06;
- /* packet number */
- *packet++ = x;
- /* fill packet with data */
- for (i = 0; i < data_len; i++)
- *packet++ = i;
-
- lp->tx_dma_addr[x] =
- pci_map_single(lp->pci_dev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->tx_ring[x].status = cpu_to_le16(status);
}
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = cpu_to_le16(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = cpu_to_le16(status);
}
x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
@@ -983,9 +965,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
ticks++;
}
if (ticks == 200) {
- if (netif_msg_hw(lp))
- printk("%s: Desc %d failed to reset!\n",
- dev->name, x);
+ netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
break;
}
}
@@ -993,15 +973,14 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
wmb();
if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
- printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
+ netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
for (x = 0; x < numbuffs; x++) {
- printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
+ netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
skb = lp->rx_skbuff[x];
- for (i = 0; i < size; i++) {
- printk("%02x ", *(skb->data + i));
- }
- printk("\n");
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", *(skb->data + i));
+ pr_cont("\n");
}
}
@@ -1012,11 +991,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
packet = lp->tx_skbuff[x]->data;
for (i = 0; i < size; i++) {
if (*(skb->data + i) != packet[i]) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Error in compare! %2x - %02x %02x\n",
- dev->name, i, *(skb->data + i),
- packet[i]);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error in compare! %2x - %02x %02x\n",
+ i, *(skb->data + i), packet[i]);
rc = 1;
break;
}
@@ -1024,7 +1001,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
x++;
}
- clean_up:
+clean_up:
*data1 = rc;
pcnet32_purge_tx_ring(dev);
@@ -1043,7 +1020,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
}
spin_unlock_irqrestore(&lp->lock, flags);
- return (rc);
+ return rc;
} /* end pcnet32_loopback_test */
static void pcnet32_led_blink_callback(struct net_device *dev)
@@ -1055,9 +1032,8 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
int i;
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
- }
spin_unlock_irqrestore(&lp->lock, flags);
mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
@@ -1079,9 +1055,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
/* Save the current value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
regs[i - 4] = a->read_bcr(ioaddr, i);
- }
spin_unlock_irqrestore(&lp->lock, flags);
mod_timer(&lp->blink_timer, jiffies);
@@ -1096,9 +1071,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
/* Restore the original value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, regs[i - 4]);
- }
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
@@ -1135,10 +1109,8 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
spin_lock_irqsave(&lp->lock, *flags);
ticks++;
if (ticks > 200) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Error getting into suspend!\n",
- dev->name);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
return 0;
}
}
@@ -1183,15 +1155,13 @@ static void pcnet32_rx_entry(struct net_device *dev,
/* Discard oversize frames. */
if (unlikely(pkt_len > PKT_BUF_SIZE)) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Impossible packet size %d!\n",
- dev->name, pkt_len);
+ netif_err(lp, drv, dev, "Impossible packet size %d!\n",
+ pkt_len);
dev->stats.rx_errors++;
return;
}
if (pkt_len < 60) {
- if (netif_msg_rx_err(lp))
- printk(KERN_ERR "%s: Runt packet!\n", dev->name);
+ netif_err(lp, rx_err, dev, "Runt packet!\n");
dev->stats.rx_errors++;
return;
}
@@ -1199,7 +1169,8 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
- if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
+ newskb = dev_alloc_skb(PKT_BUF_SKB);
+ if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
skb = lp->rx_skbuff[entry];
pci_unmap_single(lp->pci_dev,
@@ -1217,15 +1188,11 @@ static void pcnet32_rx_entry(struct net_device *dev,
rx_in_place = 1;
} else
skb = NULL;
- } else {
+ } else
skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
- }
if (skb == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
@@ -1296,11 +1263,9 @@ static int pcnet32_tx(struct net_device *dev)
/* There was a major error, log it. */
int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
dev->stats.tx_errors++;
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx error status=%04x err_status=%08x\n",
- dev->name, status,
- err_status);
+ netif_err(lp, tx_err, dev,
+ "Tx error status=%04x err_status=%08x\n",
+ status, err_status);
if (err_status & 0x04000000)
dev->stats.tx_aborted_errors++;
if (err_status & 0x08000000)
@@ -1312,10 +1277,7 @@ static int pcnet32_tx(struct net_device *dev)
dev->stats.tx_fifo_errors++;
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx FIFO error!\n",
- dev->name);
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
#else
@@ -1324,10 +1286,7 @@ static int pcnet32_tx(struct net_device *dev)
if (!lp->dxsuflo) { /* If controller doesn't recover ... */
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx FIFO error!\n",
- dev->name);
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
}
@@ -1353,11 +1312,8 @@ static int pcnet32_tx(struct net_device *dev)
delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
if (delta > lp->tx_ring_size) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, lp->cur_tx,
- lp->tx_full);
+ netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
dirty_tx += lp->tx_ring_size;
delta -= lp->tx_ring_size;
}
@@ -1420,7 +1376,7 @@ static int pcnet32_get_regs_len(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
int j = lp->phycount * PCNET32_REGS_PER_PHY;
- return ((PCNET32_NUM_REGS + j) * sizeof(u16));
+ return (PCNET32_NUM_REGS + j) * sizeof(u16);
}
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1444,21 +1400,20 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
*buff++ = inw(ioaddr + i);
/* read control and status registers */
- for (i = 0; i < 90; i++) {
+ for (i = 0; i < 90; i++)
*buff++ = a->read_csr(ioaddr, i);
- }
*buff++ = a->read_csr(ioaddr, 112);
*buff++ = a->read_csr(ioaddr, 114);
/* read bus configuration registers */
- for (i = 0; i < 30; i++) {
+ for (i = 0; i < 30; i++)
*buff++ = a->read_bcr(ioaddr, i);
- }
+
*buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
- for (i = 31; i < 36; i++) {
+
+ for (i = 31; i < 36; i++)
*buff++ = a->read_bcr(ioaddr, i);
- }
/* read mii phy registers */
if (lp->mii) {
@@ -1534,8 +1489,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err < 0) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "failed to enable device -- err=%d\n", err);
+ pr_err("failed to enable device -- err=%d\n", err);
return err;
}
pci_set_master(pdev);
@@ -1543,29 +1497,25 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_resource_start(pdev, 0);
if (!ioaddr) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "card has no PCI IO resources, aborting\n");
+ pr_err("card has no PCI IO resources, aborting\n");
return -ENODEV;
}
if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "architecture does not support 32bit PCI busmaster DMA\n");
+ pr_err("architecture does not support 32bit PCI busmaster DMA\n");
return -ENODEV;
}
- if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
- NULL) {
+ if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "io address range already allocated\n");
+ pr_err("io address range already allocated\n");
return -EBUSY;
}
err = pcnet32_probe1(ioaddr, 1, pdev);
- if (err < 0) {
+ if (err < 0)
pci_disable_device(pdev);
- }
+
return err;
}
@@ -1615,7 +1565,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
a = &pcnet32_dwio;
} else {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "No access methods\n");
+ pr_err("No access methods\n");
goto err_release_region;
}
}
@@ -1623,11 +1573,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chip_version =
a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
- printk(KERN_INFO " PCnet chip version is %#x.\n",
- chip_version);
+ pr_info(" PCnet chip version is %#x\n", chip_version);
if ((chip_version & 0xfff) != 0x003) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "Unsupported chip version.\n");
+ pr_info("Unsupported chip version\n");
goto err_release_region;
}
@@ -1680,7 +1629,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (cards_found < MAX_UNITS && homepna[cards_found])
media |= 1; /* switch to home wiring mode */
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
+ printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
(media & 1) ? "1" : "10");
a->write_bcr(ioaddr, 49, media);
break;
@@ -1696,9 +1645,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
break;
default:
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX
- "PCnet version %#x, no PCnet32 chip.\n",
- chip_version);
+ pr_info("PCnet version %#x, no PCnet32 chip\n",
+ chip_version);
goto err_release_region;
}
@@ -1720,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Memory allocation failed.\n");
+ pr_err("Memory allocation failed\n");
ret = -ENOMEM;
goto err_release_region;
}
@@ -1729,7 +1677,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+ pr_info("%s at %#3lx,", chipname, ioaddr);
/* In most chips, after a chip reset, the ethernet address is read from the
* station address PROM at the base address and programmed into the
@@ -1754,9 +1702,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" warning: CSR address invalid,\n");
- printk(KERN_INFO
- " using instead PROM address of");
+ pr_cont(" warning: CSR address invalid,\n");
+ pr_info(" using instead PROM address of");
}
memcpy(dev->dev_addr, promaddr, 6);
}
@@ -1765,57 +1712,57 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
if (!is_valid_ether_addr(dev->perm_addr))
- memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+ memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" %pM", dev->dev_addr);
+ pr_cont(" %pM", dev->dev_addr);
/* Version 0x2623 and 0x2624 */
if (((chip_version + 1) & 0xfffe) == 0x2624) {
i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
- printk(KERN_INFO " tx_start_pt(0x%04x):", i);
+ pr_info(" tx_start_pt(0x%04x):", i);
switch (i >> 10) {
case 0:
- printk(KERN_CONT " 20 bytes,");
+ pr_cont(" 20 bytes,");
break;
case 1:
- printk(KERN_CONT " 64 bytes,");
+ pr_cont(" 64 bytes,");
break;
case 2:
- printk(KERN_CONT " 128 bytes,");
+ pr_cont(" 128 bytes,");
break;
case 3:
- printk(KERN_CONT "~220 bytes,");
+ pr_cont("~220 bytes,");
break;
}
i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
- printk(KERN_CONT " BCR18(%x):", i & 0xffff);
+ pr_cont(" BCR18(%x):", i & 0xffff);
if (i & (1 << 5))
- printk(KERN_CONT "BurstWrEn ");
+ pr_cont("BurstWrEn ");
if (i & (1 << 6))
- printk(KERN_CONT "BurstRdEn ");
+ pr_cont("BurstRdEn ");
if (i & (1 << 7))
- printk(KERN_CONT "DWordIO ");
+ pr_cont("DWordIO ");
if (i & (1 << 11))
- printk(KERN_CONT "NoUFlow ");
+ pr_cont("NoUFlow ");
i = a->read_bcr(ioaddr, 25);
- printk(KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
+ pr_info(" SRAMSIZE=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 26);
- printk(KERN_CONT " SRAM_BND=0x%04x,", i << 8);
+ pr_cont(" SRAM_BND=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 27);
if (i & (1 << 14))
- printk(KERN_CONT "LowLatRx");
+ pr_cont("LowLatRx");
}
}
dev->base_addr = ioaddr;
lp = netdev_priv(dev);
/* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
- if ((lp->init_block =
- pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr)) == NULL) {
+ lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
+ &lp->init_dma_addr);
+ if (!lp->init_block) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "Consistent memory allocation failed.\n");
+ pr_err("Consistent memory allocation failed\n");
ret = -ENOMEM;
goto err_free_netdev;
}
@@ -1889,7 +1836,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (pdev) { /* use the IRQ provided by PCI */
dev->irq = pdev->irq;
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(" assigned IRQ %d.\n", dev->irq);
+ pr_cont(" assigned IRQ %d\n", dev->irq);
} else {
unsigned long irq_mask = probe_irq_on();
@@ -1905,12 +1852,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev->irq = probe_irq_off(irq_mask);
if (!dev->irq) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", failed to detect IRQ line.\n");
+ pr_cont(", failed to detect IRQ line\n");
ret = -ENODEV;
goto err_free_ring;
}
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", probed IRQ %d.\n", dev->irq);
+ pr_cont(", probed IRQ %d\n", dev->irq);
}
/* Set the mii phy_id so that we can query the link state */
@@ -1934,14 +1881,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->phymask |= (1 << i);
lp->mii_if.phy_id = i;
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX
- "Found PHY %04x:%04x at address %d.\n",
- id1, id2, i);
+ pr_info("Found PHY %04x:%04x at address %d\n",
+ id1, id2, i);
}
lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
- if (lp->phycount > 1) {
+ if (lp->phycount > 1)
lp->options |= PCNET32_PORT_MII;
- }
}
init_timer(&lp->watchdog_timer);
@@ -1965,7 +1910,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+ pr_info("%s: registered as %s\n", dev->name, lp->name);
cards_found++;
/* enable LED writes */
@@ -1994,10 +1939,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring_size,
&lp->tx_ring_dma_addr);
if (lp->tx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Consistent memory allocation failed.\n",
- name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
@@ -2006,46 +1948,35 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring_size,
&lp->rx_ring_dma_addr);
if (lp->rx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Consistent memory allocation failed.\n",
- name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->tx_dma_addr) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->rx_dma_addr) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->tx_skbuff) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->rx_skbuff) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -2114,12 +2045,11 @@ static int pcnet32_open(struct net_device *dev)
/* switch pcnet32 to 32bit mode */
lp->a.write_bcr(ioaddr, 20, 2);
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG
- "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
- dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
- (u32) (lp->rx_ring_dma_addr),
- (u32) (lp->init_dma_addr));
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
+ __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->init_dma_addr));
/* set/reset autoselect bit */
val = lp->a.read_bcr(ioaddr, 2) & ~2;
@@ -2154,10 +2084,8 @@ static int pcnet32_open(struct net_device *dev)
pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
if (lp->options & PCNET32_PORT_ASEL) {
lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
- if (netif_msg_link(lp))
- printk(KERN_DEBUG
- "%s: Setting 100Mb-Full Duplex.\n",
- dev->name);
+ netif_printk(lp, link, KERN_DEBUG, dev,
+ "Setting 100Mb-Full Duplex\n");
}
}
if (lp->phycount < 2) {
@@ -2245,9 +2173,7 @@ static int pcnet32_open(struct net_device *dev)
}
}
lp->mii_if.phy_id = first_phy;
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: Using PHY number %d.\n",
- dev->name, first_phy);
+ netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
}
#ifdef DO_DXSUFLO
@@ -2294,18 +2220,17 @@ static int pcnet32_open(struct net_device *dev)
*/
lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG
- "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
- dev->name, i,
- (u32) (lp->init_dma_addr),
- lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
+ i,
+ (u32) (lp->init_dma_addr),
+ lp->a.read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
return 0; /* Always succeed */
- err_free_ring:
+err_free_ring:
/* free any allocated skbuffs */
pcnet32_purge_rx_ring(dev);
@@ -2315,7 +2240,7 @@ static int pcnet32_open(struct net_device *dev)
*/
lp->a.write_bcr(ioaddr, 20, 4);
- err_free_irq:
+err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
free_irq(dev->irq, dev);
return rc;
@@ -2366,14 +2291,12 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
if (rx_skbuff == NULL) {
- if (!
- (rx_skbuff = lp->rx_skbuff[i] =
- dev_alloc_skb(PKT_BUF_SKB))) {
- /* there is not much, we can do at this point */
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
- dev->name);
+ lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
+ rx_skbuff = lp->rx_skbuff[i];
+ if (!rx_skbuff) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
return -1;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -2423,10 +2346,9 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
break;
- if (i >= 100 && netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_restart timed out waiting for stop.\n",
- dev->name);
+ if (i >= 100)
+ netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
+ __func__);
pcnet32_purge_tx_ring(dev);
if (pcnet32_init_ring(dev))
@@ -2450,8 +2372,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
/* Transmitter timeout, serious problems. */
if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_ERR
- "%s: transmit timed out, status %4.4x, resetting.\n",
+ pr_err("%s: transmit timed out, status %4.4x, resetting\n",
dev->name, lp->a.read_csr(ioaddr, CSR0));
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
dev->stats.tx_errors++;
@@ -2494,11 +2415,9 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&lp->lock, flags);
- if (netif_msg_tx_queued(lp)) {
- printk(KERN_DEBUG
- "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
- }
+ netif_printk(lp, tx_queued, KERN_DEBUG, dev,
+ "%s() called, csr0 %4.4x\n",
+ __func__, lp->a.read_csr(ioaddr, CSR0));
/* Default status -- will not enable Successful-TxDone
* interrupt when that option is available to us.
@@ -2557,16 +2476,14 @@ pcnet32_interrupt(int irq, void *dev_id)
csr0 = lp->a.read_csr(ioaddr, CSR0);
while ((csr0 & 0x8f00) && --boguscnt >= 0) {
- if (csr0 == 0xffff) {
+ if (csr0 == 0xffff)
break; /* PCMCIA remove happened */
- }
/* Acknowledge all of the current interrupt sources ASAP. */
lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG
- "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "interrupt csr0=%#2.2x new csr=%#2.2x\n",
+ csr0, lp->a.read_csr(ioaddr, CSR0));
/* Log misc errors. */
if (csr0 & 0x4000)
@@ -2586,10 +2503,8 @@ pcnet32_interrupt(int irq, void *dev_id)
dev->stats.rx_errors++; /* Missed a Rx frame. */
}
if (csr0 & 0x0800) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
+ netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
+ csr0);
/* unlike for the lance, there is no restart needed */
}
if (napi_schedule_prep(&lp->napi)) {
@@ -2605,9 +2520,9 @@ pcnet32_interrupt(int irq, void *dev_id)
csr0 = lp->a.read_csr(ioaddr, CSR0);
}
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "exiting interrupt, csr0=%#4.4x\n",
+ lp->a.read_csr(ioaddr, CSR0));
spin_unlock(&lp->lock);
@@ -2629,10 +2544,9 @@ static int pcnet32_close(struct net_device *dev)
dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
- if (netif_msg_ifdown(lp))
- printk(KERN_DEBUG
- "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, ifdown, KERN_DEBUG, dev,
+ "Shutting down ethercard, status was %2.2x\n",
+ lp->a.read_csr(ioaddr, CSR0));
/* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
@@ -2676,7 +2590,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
volatile struct pcnet32_init_block *ib = lp->init_block;
volatile __le16 *mcast_table = (__le16 *)ib->filter;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
unsigned long ioaddr = dev->base_addr;
char *addrs;
int i;
@@ -2697,9 +2611,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
@@ -2729,9 +2642,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
csr15 = lp->a.read_csr(ioaddr, CSR15);
if (dev->flags & IFF_PROMISC) {
/* Log any net taps. */
- if (netif_msg_hw(lp))
- printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
- dev->name);
+ netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
lp->init_block->mode =
cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
7);
@@ -2818,10 +2729,8 @@ static int pcnet32_check_otherphy(struct net_device *dev)
mii.phy_id = i;
if (mii_link_ok(&mii)) {
/* found PHY with active link */
- if (netif_msg_link(lp))
- printk(KERN_INFO
- "%s: Using PHY number %d.\n",
- dev->name, i);
+ netif_info(lp, link, dev, "Using PHY number %d\n",
+ i);
/* isolate inactive phy */
bmcr =
@@ -2867,8 +2776,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
if (!curr_link) {
if (prev_link || verbose) {
netif_carrier_off(dev);
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_info(lp, link, dev, "link down\n");
}
if (lp->phycount > 1) {
curr_link = pcnet32_check_otherphy(dev);
@@ -2880,12 +2788,11 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
if (netif_msg_link(lp)) {
struct ethtool_cmd ecmd;
mii_ethtool_gset(&lp->mii_if, &ecmd);
- printk(KERN_INFO
- "%s: link up, %sMbps, %s-duplex\n",
- dev->name,
- (ecmd.speed == SPEED_100) ? "100" : "10",
- (ecmd.duplex ==
- DUPLEX_FULL) ? "full" : "half");
+ netdev_info(dev, "link up, %sMbps, %s-duplex\n",
+ (ecmd.speed == SPEED_100)
+ ? "100" : "10",
+ (ecmd.duplex == DUPLEX_FULL)
+ ? "full" : "half");
}
bcr9 = lp->a.read_bcr(dev->base_addr, 9);
if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
@@ -2896,8 +2803,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
lp->a.write_bcr(dev->base_addr, 9, bcr9);
}
} else {
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link up\n", dev->name);
+ netif_info(lp, link, dev, "link up\n");
}
}
}
@@ -3009,7 +2915,7 @@ MODULE_LICENSE("GPL");
static int __init pcnet32_init_module(void)
{
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
@@ -3025,7 +2931,7 @@ static int __init pcnet32_init_module(void)
pcnet32_probe_vlbus(pcnet32_portlist);
if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
- printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+ pr_info("%d cards_found\n", cards_found);
return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index c13cf64095b6..f482fc4f8cf1 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -18,9 +18,6 @@
#include <linux/phy.h>
#include <linux/brcmphy.h>
-#define PHY_ID_BCM50610 0x0143bd60
-#define PHY_ID_BCM50610M 0x0143bd70
-#define PHY_ID_BCM57780 0x03625d90
#define BRCM_PHY_MODEL(phydev) \
((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
@@ -331,8 +328,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bool clk125en = true;
/* Abort if we are using an untested phy. */
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
+ if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
return;
@@ -823,7 +820,7 @@ static struct phy_driver bcm57780_driver = {
};
static struct phy_driver bcmac131_driver = {
- .phy_id = 0x0143bc70,
+ .phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
.features = PHY_BASIC_FEATURES |
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index a1bd599c8a5b..92282b31d94b 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index d926168bc780..c722e95853ff 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index b031fa21f1aa..7712ebeba9bf 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index e7070515d2e3..1fa4d73c3cca 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -20,6 +20,7 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/err.h>
+#include <linux/slab.h>
#define MII_REGS_NUM 29
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index af3f1f2a9f87..904208b95d4b 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -13,7 +13,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 4cf3324ba166..057ecaacde6b 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6f69b9ba0df8..64c7fbe0a8e7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -63,6 +62,7 @@
#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
@@ -269,6 +269,43 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+
+ /* soft reset */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+ do
+ temp = phy_read(phydev, MII_BMCR);
+ while (temp & BMCR_RESET);
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+ }
+
+
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 2576055b350b..19e70d7e27ab 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/mdio-bitbang.h>
-#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 61a4461cbda5..a872aea4ed74 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -6,6 +6,7 @@
* Copyright (C) 2009 Cavium Networks
*/
+#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index bd4e8d72dc08..e17b70291bbc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -264,6 +264,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
(phydev->phy_id & phydrv->phy_id_mask));
}
+#ifdef CONFIG_PM
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->dev.driver;
@@ -295,34 +297,88 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
return true;
}
-/* Suspend and resume. Copied from platform_suspend and
- * platform_resume
- */
-static int mdio_bus_suspend(struct device * dev, pm_message_t state)
+static int mdio_bus_suspend(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ /*
+ * We must stop the state machine manually, otherwise it stops out of
+ * control, possibly with the phydev->lock held. Upon resume, netdev
+ * may call phy routines that try to grab the same lock, and that may
+ * lead to a deadlock.
+ */
+ if (phydev->attached_dev)
+ phy_stop_machine(phydev);
+
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
+
return phydrv->suspend(phydev);
}
-static int mdio_bus_resume(struct device * dev)
+static int mdio_bus_resume(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ int ret;
if (!mdio_bus_phy_may_suspend(phydev))
+ goto no_resume;
+
+ ret = phydrv->resume(phydev);
+ if (ret < 0)
+ return ret;
+
+no_resume:
+ if (phydev->attached_dev)
+ phy_start_machine(phydev, NULL);
+
+ return 0;
+}
+
+static int mdio_bus_restore(struct device *dev)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ struct net_device *netdev = phydev->attached_dev;
+ int ret;
+
+ if (!netdev)
return 0;
- return phydrv->resume(phydev);
+
+ ret = phy_init_hw(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* The PHY needs to renegotiate. */
+ phydev->link = 0;
+ phydev->state = PHY_UP;
+
+ phy_start_machine(phydev, NULL);
+
+ return 0;
}
+static struct dev_pm_ops mdio_bus_pm_ops = {
+ .suspend = mdio_bus_suspend,
+ .resume = mdio_bus_resume,
+ .freeze = mdio_bus_suspend,
+ .thaw = mdio_bus_resume,
+ .restore = mdio_bus_restore,
+};
+
+#define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops)
+
+#else
+
+#define MDIO_BUS_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
- .suspend = mdio_bus_suspend,
- .resume = mdio_bus_resume,
+ .pm = MDIO_BUS_PM_OPS,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b0e9f9c51721..64be4664ccab 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -19,7 +19,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -410,7 +409,6 @@ EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work);
-static void phy_state_machine(struct work_struct *work);
/**
* phy_start_machine - start PHY state machine tracking
@@ -430,7 +428,6 @@ void phy_start_machine(struct phy_device *phydev,
{
phydev->adjust_state = handler;
- INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
schedule_delayed_work(&phydev->state_queue, HZ);
}
@@ -761,7 +758,7 @@ EXPORT_SYMBOL(phy_start);
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
*/
-static void phy_state_machine(struct work_struct *work)
+void phy_state_machine(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b10fedd82143..db1794546c56 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->state = PHY_DOWN;
mutex_init(&dev->lock);
+ INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
return dev;
}
@@ -276,6 +277,22 @@ int phy_device_register(struct phy_device *phydev)
EXPORT_SYMBOL(phy_device_register);
/**
+ * phy_find_first - finds the first PHY device on the bus
+ * @bus: the target MII bus
+ */
+struct phy_device *phy_find_first(struct mii_bus *bus)
+{
+ int addr;
+
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ if (bus->phy_map[addr])
+ return bus->phy_map[addr];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(phy_find_first);
+
+/**
* phy_prepare_link - prepares the PHY layer to monitor link status
* @phydev: target phy_device struct
* @handler: callback function for link status change notifications
@@ -378,6 +395,20 @@ void phy_disconnect(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_disconnect);
+int phy_init_hw(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->drv || !phydev->drv->config_init)
+ return 0;
+
+ ret = phy_scan_fixups(phydev);
+ if (ret < 0)
+ return ret;
+
+ return phydev->drv->config_init(phydev);
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -425,21 +456,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
- if (phydev->drv->config_init) {
- int err;
-
- err = phy_scan_fixups(phydev);
-
- if (err < 0)
- return err;
-
- err = phydev->drv->config_init(phydev);
-
- if (err < 0)
- return err;
- }
-
- return 0;
+ return phy_init_hw(phydev);
}
EXPORT_SYMBOL(phy_attach_direct);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 23062d067231..f6e190f73c32 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd7..ed2644a57500 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
#define MII_LAN83C185_ISF_INT_ALL (0x0e)
#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+ MII_LAN83C185_ISF_INT7)
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
static int smsc_phy_config_intr(struct phy_device *phydev)
{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
return smsc_phy_ack_interrupt (phydev);
}
+static int lan911x_config_init(struct phy_device *phydev)
+{
+ return smsc_phy_ack_interrupt(phydev);
+}
static struct phy_driver lan83c185_driver = {
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan911x_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 3327e9fc7b51..9a2103a69e17 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -94,6 +94,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/string.h>
+#include <linux/slab.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/errno.h>
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6a375ea4947d..6c2e8fa0ca31 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/string.h>
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 2282e729edbe..6e281bc825e5 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
#include <linux/stddef.h>
#include <linux/device.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <net/slhc_vj.h>
#include <asm/atomic.h>
@@ -167,7 +168,7 @@ struct channel {
u8 avail; /* flag used in multilink stuff */
u8 had_frag; /* >= 1 fragments have been sent */
u32 lastseq; /* MP: last sequence # received */
- int speed; /* speed of the corresponding ppp channel*/
+ int speed; /* speed of the corresponding ppp channel*/
#endif /* CONFIG_PPP_MULTILINK */
};
@@ -1293,13 +1294,13 @@ ppp_push(struct ppp *ppp)
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
- int len, totlen;
- int i, bits, hdrlen, mtu;
- int flen;
- int navail, nfree, nzero;
- int nbigger;
- int totspeed;
- int totfree;
+ int len, totlen;
+ int i, bits, hdrlen, mtu;
+ int flen;
+ int navail, nfree, nzero;
+ int nbigger;
+ int totspeed;
+ int totfree;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
@@ -1307,21 +1308,21 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
struct ppp_channel *chan;
totspeed = 0; /*total bitrate of the bundle*/
- nfree = 0; /* # channels which have no packet already queued */
- navail = 0; /* total # of usable channels (not deregistered) */
- nzero = 0; /* number of channels with zero speed associated*/
- totfree = 0; /*total # of channels available and
+ nfree = 0; /* # channels which have no packet already queued */
+ navail = 0; /* total # of usable channels (not deregistered) */
+ nzero = 0; /* number of channels with zero speed associated*/
+ totfree = 0; /*total # of channels available and
*having no queued packets before
*starting the fragmentation*/
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
- i = 0;
- list_for_each_entry(pch, &ppp->channels, clist) {
+ i = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
navail += pch->avail = (pch->chan != NULL);
pch->speed = pch->chan->speed;
- if (pch->avail) {
+ if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
- !pch->had_frag) {
+ !pch->had_frag) {
if (pch->speed == 0)
nzero++;
else
@@ -1331,60 +1332,60 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
++nfree;
++totfree;
}
- if (!pch->had_frag && i < ppp->nxchan)
- ppp->nxchan = i;
+ if (!pch->had_frag && i < ppp->nxchan)
+ ppp->nxchan = i;
}
++i;
}
/*
- * Don't start sending this packet unless at least half of
- * the channels are free. This gives much better TCP
- * performance if we have a lot of channels.
+ * Don't start sending this packet unless at least half of
+ * the channels are free. This gives much better TCP
+ * performance if we have a lot of channels.
*/
- if (nfree == 0 || nfree < navail / 2)
- return 0; /* can't take now, leave it in xmit_pending */
+ if (nfree == 0 || nfree < navail / 2)
+ return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression (XXX this should be optional) */
- p = skb->data;
- len = skb->len;
+ p = skb->data;
+ len = skb->len;
if (*p == 0) {
++p;
--len;
}
totlen = len;
- nbigger = len % nfree;
+ nbigger = len % nfree;
- /* skip to the channel after the one we last used
- and start at that one */
+ /* skip to the channel after the one we last used
+ and start at that one */
list = &ppp->channels;
- for (i = 0; i < ppp->nxchan; ++i) {
+ for (i = 0; i < ppp->nxchan; ++i) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
break;
}
}
- /* create a fragment for each channel */
+ /* create a fragment for each channel */
bits = B;
- while (len > 0) {
+ while (len > 0) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
continue;
}
- pch = list_entry(list, struct channel, clist);
+ pch = list_entry(list, struct channel, clist);
++i;
if (!pch->avail)
continue;
/*
- * Skip this channel if it has a fragment pending already and
- * we haven't given a fragment to all of the free channels.
+ * Skip this channel if it has a fragment pending already and
+ * we haven't given a fragment to all of the free channels.
*/
if (pch->avail == 1) {
- if (nfree > 0)
+ if (nfree > 0)
continue;
} else {
pch->avail = 1;
@@ -1393,32 +1394,32 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* check the channel's mtu and whether it is still attached. */
spin_lock_bh(&pch->downl);
if (pch->chan == NULL) {
- /* can't use this channel, it's being deregistered */
+ /* can't use this channel, it's being deregistered */
if (pch->speed == 0)
nzero--;
else
- totspeed -= pch->speed;
+ totspeed -= pch->speed;
spin_unlock_bh(&pch->downl);
pch->avail = 0;
totlen = len;
totfree--;
nfree--;
- if (--navail == 0)
+ if (--navail == 0)
break;
continue;
}
/*
*if the channel speed is not set divide
- *the packet evenly among the free channels;
+ *the packet evenly among the free channels;
*otherwise divide it according to the speed
*of the channel we are going to transmit on
*/
flen = len;
if (nfree > 0) {
if (pch->speed == 0) {
- flen = totlen/nfree ;
+ flen = totlen/nfree;
if (nbigger > 0) {
flen++;
nbigger--;
@@ -1436,8 +1437,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
}
/*
- *check if we are on the last channel or
- *we exceded the lenght of the data to
+ *check if we are on the last channel or
+ *we exceded the lenght of the data to
*fragment
*/
if ((nfree <= 0) || (flen > len))
@@ -1448,29 +1449,29 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
*above formula will be equal or less than zero.
*Skip the channel in this case
*/
- if (flen <= 0) {
+ if (flen <= 0) {
pch->avail = 2;
spin_unlock_bh(&pch->downl);
continue;
}
- mtu = pch->chan->mtu - hdrlen;
- if (mtu < 4)
- mtu = 4;
+ mtu = pch->chan->mtu - hdrlen;
+ if (mtu < 4)
+ mtu = 4;
if (flen > mtu)
flen = mtu;
- if (flen == len)
- bits |= E;
- frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
+ if (flen == len)
+ bits |= E;
+ frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (!frag)
goto noskb;
- q = skb_put(frag, flen + hdrlen);
+ q = skb_put(frag, flen + hdrlen);
- /* make the MP header */
+ /* make the MP header */
q[0] = PPP_MP >> 8;
q[1] = PPP_MP;
if (ppp->flags & SC_MP_XSHORTSEQ) {
- q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
} else {
q[2] = bits;
@@ -1483,24 +1484,24 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* try to send it down the channel */
chan = pch->chan;
- if (!skb_queue_empty(&pch->file.xq) ||
+ if (!skb_queue_empty(&pch->file.xq) ||
!chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
- pch->had_frag = 1;
+ pch->had_frag = 1;
p += flen;
- len -= flen;
+ len -= flen;
++ppp->nxseq;
bits = 0;
spin_unlock_bh(&pch->downl);
}
- ppp->nxchan = i;
+ ppp->nxchan = i;
return 1;
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ printk(KERN_ERR "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 3a13cecae3e2..52938da1e542 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -44,6 +44,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
#define PPP_VERSION "2.4.2"
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 9fbb2eba9a06..449a9825200d 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -756,6 +756,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
/* Try to dequeue as many skbs from reorder_q as we can. */
pppol2tp_recv_dequeue(session);
+ sock_put(sock);
return 0;
@@ -772,6 +773,7 @@ discard_bad_csum:
UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
tunnel->stats.rx_errors++;
kfree_skb(skb);
+ sock_put(sock);
return 0;
@@ -1180,7 +1182,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
/* Calculate UDP checksum if configured to do so */
if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
+ else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
+ (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
skb->ip_summed = CHECKSUM_COMPLETE;
csum = skb_checksum(skb, 0, udp_len, 0);
uh->check = csum_tcpudp_magic(inet->inet_saddr,
@@ -1661,6 +1664,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (tunnel_sock == NULL)
goto end;
+ sock_hold(tunnel_sock);
tunnel = tunnel_sock->sk_user_data;
} else {
tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index ac806b27c658..d4191ef9cad1 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -22,7 +22,6 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 0c768593aad0..5bf229bb34c2 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -568,7 +569,7 @@ void gelic_net_set_multi(struct net_device *netdev)
status);
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) {
+ (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) {
status = lv1_net_add_multicast_address(bus_id(card),
dev_id(card),
0, 1);
@@ -580,7 +581,7 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/* set multicast addresses */
- for (mc = netdev->mc_list; mc; mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
addr = 0;
p = mc->dmi_addr;
for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 227b141c4fbd..f0be507e5324 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -1389,113 +1390,6 @@ static int gelic_wl_get_mode(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-/* SIOCIWFIRSTPRIV */
-static int hex2bin(u8 *str, u8 *bin, unsigned int len)
-{
- unsigned int i;
- static unsigned char *hex = "0123456789ABCDEF";
- unsigned char *p, *q;
- u8 tmp;
-
- if (len != WPA_PSK_LEN * 2)
- return -EINVAL;
-
- for (i = 0; i < WPA_PSK_LEN * 2; i += 2) {
- p = strchr(hex, toupper(str[i]));
- q = strchr(hex, toupper(str[i + 1]));
- if (!p || !q) {
- pr_info("%s: unconvertible PSK digit=%d\n",
- __func__, i);
- return -EINVAL;
- }
- tmp = ((p - hex) << 4) + (q - hex);
- *bin++ = tmp;
- }
- return 0;
-};
-
-static int gelic_wl_priv_set_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- unsigned int len;
- unsigned long irqflag;
- int ret = 0;
-
- pr_debug("%s:<- len=%d\n", __func__, data->data.length);
- len = data->data.length - 1;
- if (len <= 2)
- return -EINVAL;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- if (extra[0] == '"' && extra[len - 1] == '"') {
- pr_debug("%s: passphrase mode\n", __func__);
- /* pass phrase */
- if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) {
- pr_info("%s: passphrase too long\n", __func__);
- ret = -E2BIG;
- goto out;
- }
- memset(wl->psk, 0, sizeof(wl->psk));
- wl->psk_len = len - 2;
- memcpy(wl->psk, &(extra[1]), wl->psk_len);
- wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
- } else {
- ret = hex2bin(extra, wl->psk, len);
- if (ret)
- goto out;
- wl->psk_len = WPA_PSK_LEN;
- wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
- }
- set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
-out:
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:->\n", __func__);
- return ret;
-}
-
-static int gelic_wl_priv_get_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- char *p;
- unsigned long irqflag;
- unsigned int i;
-
- pr_debug("%s:<-\n", __func__);
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- p = extra;
- if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) {
- if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) {
- for (i = 0; i < wl->psk_len; i++) {
- sprintf(p, "%02xu", wl->psk[i]);
- p += 2;
- }
- *p = '\0';
- data->data.length = wl->psk_len * 2;
- } else {
- *p++ = '"';
- memcpy(p, wl->psk, wl->psk_len);
- p += wl->psk_len;
- *p++ = '"';
- *p = '\0';
- data->data.length = wl->psk_len + 2;
- }
- } else
- /* no psk set */
- data->data.length = 0;
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:-> %d\n", __func__, data->data.length);
- return 0;
-}
-#endif
-
/* SIOCGIWNICKN */
static int gelic_wl_get_nick(struct net_device *net_dev,
struct iw_request_info *info,
@@ -1571,8 +1465,10 @@ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
init_completion(&wl->scan_done);
/*
* If we have already a bss list, don't try to get new
+ * unless we are doing an ESSID scan
*/
- if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
+ if ((!essid_len && !always_scan)
+ && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
pr_debug("%s: already has the list\n", __func__);
complete(&wl->scan_done);
goto out;
@@ -1673,7 +1569,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
}
}
- /* put them in the newtork_list */
+ /* put them in the network_list */
for (i = 0, scan_info_size = 0, scan_info = buf;
scan_info_size < data_len;
i++, scan_info_size += be16_to_cpu(scan_info->size),
@@ -2009,7 +1905,7 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
/* PSK type */
wpa->psk_type = cpu_to_be16(wl->psk_type);
#ifdef DEBUG
- pr_debug("%s: sec=%s psktype=%s\nn", __func__,
+ pr_debug("%s: sec=%s psktype=%s\n", __func__,
wpasecstr(wpa->security),
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
"BIN" : "passphrase");
@@ -2019,9 +1915,9 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
* the debug log because this dumps your precious
* passphrase/key.
*/
- pr_debug("%s: psk=%s\n",
+ pr_debug("%s: psk=%s\n", __func__,
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
- (char *)"N/A" : (char *)wpa->psk);
+ "N/A" : wpa->psk);
#endif
#endif
/* issue wpa setup */
@@ -2406,40 +2302,10 @@ static const iw_handler gelic_wl_wext_handler[] =
IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
};
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-static struct iw_priv_args gelic_wl_private_args[] =
-{
- {
- .cmd = GELIC_WL_PRIV_SET_PSK,
- .set_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "set_psk"
- },
- {
- .cmd = GELIC_WL_PRIV_GET_PSK,
- .get_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "get_psk"
- }
-};
-
-static const iw_handler gelic_wl_private_handler[] =
-{
- gelic_wl_priv_set_psk,
- gelic_wl_priv_get_psk,
-};
-#endif
-
static const struct iw_handler_def gelic_wl_wext_handler_def = {
.num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
.standard = gelic_wl_wext_handler,
.get_wireless_stats = gelic_wl_get_wireless_stats,
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
- .num_private = ARRAY_SIZE(gelic_wl_private_handler),
- .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
- .private = gelic_wl_private_handler,
- .private_args = gelic_wl_private_args,
-#endif
};
static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8d..4ef0afbcbe1b 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
-static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
/* required last entry */
@@ -4087,7 +4087,6 @@ static void __devexit ql3xxx_remove(struct pci_dev *pdev)
struct ql3_adapter *qdev = netdev_priv(ndev);
unregister_netdev(ndev);
- qdev = netdev_priv(ndev);
ql_disable_interrupts(qdev);
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 000000000000..ddba83ef3f44
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+ qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 000000000000..0da94b208db1
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1131 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+
+#include "qlcnic_hdr.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 0
+#define _QLCNIC_LINUX_SUBVERSION 0
+#define QLCNIC_LINUX_VERSIONID "5.0.0"
+
+#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
+ * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0 0x50
+
+#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define P3_MAX_MTU (9600)
+#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
+#define QLCNIC_LRO_BUFFER_EXTRA 2048
+
+#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* Tx defines */
+#define MAX_BUFFERS_PER_CMD 32
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+#define NUM_RCV_DESC_RINGS 3
+#define NUM_STS_DESC_RINGS 4
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO 2
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+#define MAX_LRO_RCV_DESCRIPTORS 8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode = \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ __le32 reserved2;
+ __le16 reserved;
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
+#define QLCNIC_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF 10
+#define QLCNIC_UNI_FLAGS_OFF 11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ u32 findex;
+ u32 num_entries;
+ u32 entry_size;
+ u32 reserved[5];
+};
+
+struct uni_data_desc{
+ u32 findex;
+ u32 size;
+ u32 reserved[5];
+};
+
+/* Magic number to let user know flash is programmed */
+#define QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START 0x4000 /* board config */
+#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
+#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
+#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+
+#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE 1
+#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
+
+extern char qlcnic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+struct qlcnic_recv_crb {
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/* Following defines are for the state of the buffers */
+#define QLCNIC_BUFFER_FREE 0
+#define QLCNIC_BUFFER_BUSY 1
+
+/*
+ * There will be one qlcnic_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+ struct sk_buff *skb;
+ struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+ struct list_head list;
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define QLCNIC_GBE 0x01
+#define QLCNIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *ocm_win_crb;
+
+ unsigned long pci_len0;
+
+ u32 ocm_win;
+ u32 crb_win;
+
+ rwlock_t crb_lock;
+ struct mutex mem_lock;
+
+ u8 cut_through;
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u16 port_type;
+ u16 board_type;
+};
+
+struct qlcnic_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+ u64 lrobytes;
+ u64 lso_frames;
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 skb_alloc_failure;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+ u32 producer;
+ u32 num_desc;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+};
+
+struct qlcnic_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+ void __iomem *crb_intr_mask;
+
+ struct status_desc *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+};
+
+struct qlcnic_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ void __iomem *crb_cmd_producer;
+ u32 num_desc;
+
+ struct netdev_queue *txq;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+ dma_addr_t hw_cons_phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT 4000
+#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define QLCNIC_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK 0x00000001
+#define QLCNIC_CDRP_RSP_FAIL 0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
+#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
+
+#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
+#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
+#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
+#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
+#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
+#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
+#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
+#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
+#define QLCNIC_CDRP_CMD_MAX 0x0000001f
+
+#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_TIMEOUT 17
+#define QLCNIC_DESTROY_CTX_RESET 0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
+#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
+#define QLCNIC_CAP0_LSO (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+
+/*
+ * Context state
+ */
+#define QLCHAL_VERSION 1
+
+#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_rds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+};
+
+struct qlcnic_hostrq_rx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+};
+
+struct qlcnic_cardrsp_rds_ring{
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+};
+
+struct qlcnic_cardrsp_sds_ring {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_rx_ctx {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+};
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_tx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+};
+
+struct qlcnic_cardrsp_cds_ring {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_tx_ctx {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+};
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P3 38
+
+#define QLCNIC_MAC_NOOP 0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+
+struct qlcnic_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define QLCNIC_INTR_DEFAULT 0x04
+
+union qlcnic_nic_intr_coalesce_data {
+ struct {
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
+ } data;
+ u64 word;
+};
+
+struct qlcnic_nic_intr_coalesce {
+ u16 stats_time_us;
+ u16 rate_sample_time;
+ u16 flags;
+ u16 rsvd_1;
+ u32 low_threshold;
+ u32 high_threshold;
+ union qlcnic_nic_intr_coalesce_data normal;
+ union qlcnic_nic_intr_coalesce_data low;
+ union qlcnic_nic_intr_coalesce_data high;
+ union qlcnic_nic_intr_coalesce_data irq;
+};
+
+#define QLCNIC_HOST_REQUEST 0x13
+#define QLCNIC_REQUEST 0x14
+
+#define QLCNIC_MAC_EVENT 0x1
+
+#define QLCNIC_IP_UP 2
+#define QLCNIC_IP_DOWN 3
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_START 0
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
+#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
+#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
+#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
+#define QLCNIC_C2C_OPCODE 22
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
+#define QLCNIC_H2C_OPCODE_LAST 25
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_START 128
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_LAST 142
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP 4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
+#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
+#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+#define AUTO_FW_RESET_ENABLED 0x01
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+};
+
+struct qlcnic_nic_req {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+};
+
+struct qlcnic_mac_req {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+};
+
+#define QLCNIC_MSI_ENABLED 0x02
+#define QLCNIC_MSIX_ENABLED 0x04
+#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_BRIDGE_ENABLED 0X10
+#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define QLCNIC_MSIX_TBL_SPACE 8192
+#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+
+#define QLCNIC_NETDEV_WEIGHT 128
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED 0
+#define __QLCNIC_DEV_UP 1
+#define __QLCNIC_RESETTING 2
+#define __QLCNIC_START_FW 4
+
+#define QLCNIC_INTERRUPT_TEST 1
+#define QLCNIC_LOOPBACK_TEST 2
+
+struct qlcnic_adapter {
+ struct qlcnic_hardware_context ahw;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct list_head mac_list;
+
+ spinlock_t tx_clean_lock;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 rx_csum;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 rsrvd1;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u32 capabilities;
+ u32 flags;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbit;
+
+ u8 dev_state;
+ u8 diag_test;
+ u8 diag_cnt;
+ u8 rsrd1;
+ u16 rsrd2;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct qlcnic_adapter_stats stats;
+
+ struct qlcnic_recv_context recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct delayed_work fw_work;
+
+ struct work_struct tx_timeout_task;
+
+ struct qlcnic_nic_intr_coalesce coal;
+
+ unsigned long state;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+};
+
+int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
+int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
+
+u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
+int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define QLCRD32(adapter, off) \
+ (qlcnic_hw_read_wx_2M(adapter, off))
+#define QLCWR32(adapter, off, val) \
+ (qlcnic_hw_write_wx_2M(adapter, off, val))
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a) \
+ qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a) \
+ qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_api_lock(a) \
+ qlcnic_pcie_sem_lock((a), 5, 0)
+#define qlcnic_api_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 5)
+#define qlcnic_sw_lock(a) \
+ qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 7)
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
+int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
+
+/* Functions from qlcnic_init.c */
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring);
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
+void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring);
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
+int qlcnic_check_loopback_buff(unsigned char *data);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_brdinfo {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short sub_vendor;
+ unsigned short sub_device;
+ char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static const struct qlcnic_brdinfo qlcnic_boards[] = {
+ {0x1077, 0x8020, 0x1077, 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ if (tx_ring->producer < tx_ring->sw_consumer)
+ return tx_ring->sw_consumer - tx_ring->producer;
+ else
+ return tx_ring->sw_consumer + tx_ring->num_desc -
+ tx_ring->producer;
+}
+
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+
+#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000000..0a6a39914aec
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+ u32 rsp;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+ return QLCNIC_CDRP_RSP_TIMEOUT;
+
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+u32
+qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+{
+ u32 rsp;
+ u32 signature;
+ u32 rcode = QLCNIC_RCODE_SUCCESS;
+ struct pci_dev *pdev = adapter->pdev;
+
+ signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
+
+ /* Acquire semaphore before accessing CRB */
+ if (qlcnic_api_lock(adapter))
+ return QLCNIC_RCODE_TIMEOUT;
+
+ QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+ QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
+ QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
+ QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
+
+ rsp = qlcnic_poll_rsp(adapter);
+
+ if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+ dev_err(&pdev->dev, "card response timeout.\n");
+ rcode = QLCNIC_RCODE_TIMEOUT;
+ } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+ rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ rcode);
+ }
+
+ /* Release semaphore */
+ qlcnic_api_unlock(adapter);
+
+ return rcode;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ mtu,
+ 0,
+ QLCNIC_CDRP_CMD_SET_MTU)) {
+
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ struct qlcnic_hostrq_rx_ctx *prq;
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_cardrsp_rds_ring *prsp_rds;
+ struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val;
+ int err;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &hostrq_phys_addr);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = (struct qlcnic_hostrq_rx_ctx *)addr;
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &cardrsp_phys_addr);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+ cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = cpu_to_le32(0);
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ (u32)(phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_RX_CTX);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+
+ reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+ }
+}
+
+static int
+qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hostrq_tx_ctx *prq;
+ struct qlcnic_hostrq_cds_ring *prq_cds;
+ struct qlcnic_cardrsp_tx_ctx *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err;
+ u64 phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+ rq_addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &rq_phys_addr);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+ rsp_addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &rsp_phys_addr);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+ QLCNIC_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+ prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ ((u32)phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_TX_CTX);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(temp - 0x200));
+
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ adapter->tx_context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ }
+}
+
+int
+qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
+{
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_READ_PHY)) {
+
+ return -EIO;
+ }
+
+ return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+}
+
+int
+qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
+{
+ return qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ val,
+ 0,
+ QLCNIC_CDRP_CMD_WRITE_PHY);
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ int err;
+ int ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr);
+ if (tx_ring->hw_consumer == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+ *(tx_ring->hw_consumer) = 0;
+
+ /* cmd desc ring */
+ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
+ return -ENOMEM;
+ }
+
+ tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = pci_alloc_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate rds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = (struct rcv_desc *)addr;
+
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate sds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = (struct status_desc *)addr;
+ }
+
+
+ err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+
+ set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ return 0;
+
+err_out_free:
+ qlcnic_free_hw_resources(adapter);
+ return err;
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+ }
+
+ recv_ctx = &adapter->recv_ctx;
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->hw_consumer != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000000..f83e15fe3e1b
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1032 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_called",
+ QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished",
+ QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped",
+ QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped",
+ QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed",
+ QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts",
+ QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts",
+ QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes",
+ QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes",
+ QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"lrobytes",
+ QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames",
+ QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"xmit_on",
+ QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off",
+ QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
+ QLC_OFF(stats.skb_alloc_failure)},
+
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline",
+ "Interrupt_Test_offline",
+ "Loopback_Test_offline"
+};
+
+#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+
+#define QLCNIC_RING_REGS_COUNT 20
+#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_MAX_EEPROM_LEN 1024
+
+static const u32 diag_registers[] = {
+ CRB_CMDPEG_STATE,
+ CRB_RCVPEG_STATE,
+ CRB_XG_STATE_P3,
+ CRB_FW_CAPABILITIES_1,
+ ISR_INT_STATE_REG,
+ QLCNIC_CRB_DEV_REF_COUNT,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_PEG_HALT_STATUS1,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_CRB_PEG_NET_0+0x3c,
+ QLCNIC_CRB_PEG_NET_1+0x3c,
+ QLCNIC_CRB_PEG_NET_2+0x3c,
+ QLCNIC_CRB_PEG_NET_4+0x3c,
+ -1
+};
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+ return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+ return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 fw_major, fw_minor, fw_build;
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+}
+
+static int
+qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+ u16 pcifn = adapter->ahw.pci_func;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex;
+ ecmd->autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ if (val == QLCNIC_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+ ecmd->speed = P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val);
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw.board_type) {
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = adapter->link_autoneg;
+ break;
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+ adapter->ahw.board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = PORT_OTHER;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ __u32 status;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ /* autonegotiation */
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ ecmd->autoneg) != 0)
+ return -EIO;
+ else
+ adapter->link_autoneg = ecmd->autoneg;
+
+ if (qlcnic_fw_cmd_query_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) != 0)
+ return -EIO;
+
+ switch (ecmd->speed) {
+ case SPEED_10:
+ qlcnic_set_phy_speed(status, 0);
+ break;
+ case SPEED_100:
+ qlcnic_set_phy_speed(status, 1);
+ break;
+ case SPEED_1000:
+ qlcnic_set_phy_speed(status, 2);
+ break;
+ }
+
+ if (ecmd->duplex == DUPLEX_HALF)
+ qlcnic_clear_phy_duplex(status);
+ if (ecmd->duplex == DUPLEX_FULL)
+ qlcnic_set_phy_duplex(status);
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ *((int *)&status)) != 0)
+ return -EIO;
+ else {
+ adapter->link_speed = ecmd->speed;
+ adapter->link_duplex = ecmd->duplex;
+ }
+ } else
+ return -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+
+ memset(p, 0, qlcnic_get_regs_len(dev));
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (adapter->pdev)->device;
+
+ for (i = 0; diag_registers[i] != -1; i++)
+ regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
+
+ regs_buff[i++] = 1; /* No. of tx ring */
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+ regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = 2; /* No. of rx ring */
+ regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 val;
+
+ val = QLCRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->rx_jumbo_pending += adapter->num_lro_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ } else {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+
+ ring->rx_mini_max_pending = 0;
+ ring->rx_mini_pending = 0;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ qlcnic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+ u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+ max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+ num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+ num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return qlcnic_reset_context(adapter);
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ /* read mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ qlcnic_gb_rx_flowctl(val);
+ else
+ qlcnic_gb_unset_rx_flowctl(val);
+
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb0_mask(val);
+ else
+ qlcnic_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb1_mask(val);
+ else
+ qlcnic_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb2_mask(val);
+ else
+ qlcnic_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb3_mask(val);
+ else
+ qlcnic_gb_set_gb3_mask(val);
+ break;
+ }
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return -EIO;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg0_mask(val);
+ else
+ qlcnic_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg1_mask(val);
+ else
+ qlcnic_xg_set_xg1_mask(val);
+ }
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+ return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
+
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ data_written = (u32)0xa5a5a5a5;
+
+ QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
+ if (data_written != data_read)
+ return 1;
+
+ return 0;
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLCNIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return QLCNIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define QLC_ILB_PKT_SIZE 64
+
+static void qlcnic_create_loopback_buff(unsigned char *data)
+{
+ unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+ memset(data, 0x4e, QLC_ILB_PKT_SIZE);
+ memset(data, 0xff, 12);
+ memcpy(data + 12, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data)
+{
+ unsigned char buff[QLC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff);
+ return memcmp(data, buff, QLC_ILB_PKT_SIZE);
+}
+
+static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
+ qlcnic_create_loopback_buff(skb->data);
+ skb_put(skb, QLC_ILB_PKT_SIZE);
+
+ adapter->diag_cnt = 0;
+
+ qlcnic_xmit_frame(skb, adapter->netdev);
+
+ msleep(5);
+
+ qlcnic_process_rcv_ring_diag(sds_ring);
+
+ dev_kfree_skb_any(skb);
+ if (!adapter->diag_cnt)
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ ret = qlcnic_set_ilb_mode(adapter);
+ if (ret)
+ goto done;
+
+ ret = qlcnic_do_ilb_test(adapter);
+
+ qlcnic_clear_ilb_mode(adapter);
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto clear_it;
+
+ adapter->diag_cnt = 0;
+ ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
+ QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
+ if (ret)
+ goto done;
+
+ msleep(10);
+
+ ret = !adapter->diag_cnt;
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ data[2] = qlcnic_irq_test(dev);
+ if (data[2])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[3] = qlcnic_loopback_test(dev);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ }
+
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* link test */
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *qlcnic_gstrings_test,
+ QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void
+qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ qlcnic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (qlcnic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ }
+}
+
+static u32 qlcnic_get_tx_csum(struct net_device *dev)
+{
+ return dev->features & NETIF_F_IP_CSUM;
+}
+
+static u32 qlcnic_get_rx_csum(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ return adapter->rx_csum;
+}
+
+static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ adapter->rx_csum = !!data;
+ return 0;
+}
+
+static u32 qlcnic_get_tso(struct net_device *dev)
+{
+ return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
+}
+
+static int qlcnic_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
+
+static int qlcnic_blink_led(struct net_device *dev, u32 val)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int ret;
+
+ ret = qlcnic_config_led(adapter, 1, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set LED blink state.\n");
+ return ret;
+ }
+
+ msleep_interruptible(val * 1000);
+
+ ret = qlcnic_config_led(adapter, 0, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
+static int qlcnic_set_flags(struct net_device *netdev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int hw_lro;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+ return -EINVAL;
+
+ ethtool_op_set_flags(netdev, data);
+
+ hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+
+
+ return 0;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .set_settings = qlcnic_set_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .set_pauseparam = qlcnic_set_pauseparam,
+ .get_tx_csum = qlcnic_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = qlcnic_get_tso,
+ .set_tso = qlcnic_set_tso,
+ .get_wol = qlcnic_get_wol,
+ .set_wol = qlcnic_set_wol,
+ .self_test = qlcnic_diag_test,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_rx_csum = qlcnic_get_rx_csum,
+ .set_rx_csum = qlcnic_set_rx_csum,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = qlcnic_set_flags,
+ .phys_id = qlcnic_blink_led,
+};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000000..0469f84360a4
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+ QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+ QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+ QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+ QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+ QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+ QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+ QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+ QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+ QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+ QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+ QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+ QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+ QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+ QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+ QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+ QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+ QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+ QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+ QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+ QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+ QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+ QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+ QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+ QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGND_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+ QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+ QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+ QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+ QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+ QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+ QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+ QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+ QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ QLCNIC_HW_PX_MAP_CRB_PH = 0,
+ QLCNIC_HW_PX_MAP_CRB_PS,
+ QLCNIC_HW_PX_MAP_CRB_MN,
+ QLCNIC_HW_PX_MAP_CRB_MS,
+ QLCNIC_HW_PX_MAP_CRB_PGR1,
+ QLCNIC_HW_PX_MAP_CRB_SRE,
+ QLCNIC_HW_PX_MAP_CRB_NIU,
+ QLCNIC_HW_PX_MAP_CRB_QMN,
+ QLCNIC_HW_PX_MAP_CRB_SQN0,
+ QLCNIC_HW_PX_MAP_CRB_SQN1,
+ QLCNIC_HW_PX_MAP_CRB_SQN2,
+ QLCNIC_HW_PX_MAP_CRB_SQN3,
+ QLCNIC_HW_PX_MAP_CRB_QMS,
+ QLCNIC_HW_PX_MAP_CRB_SQS0,
+ QLCNIC_HW_PX_MAP_CRB_SQS1,
+ QLCNIC_HW_PX_MAP_CRB_SQS2,
+ QLCNIC_HW_PX_MAP_CRB_SQS3,
+ QLCNIC_HW_PX_MAP_CRB_PGN0,
+ QLCNIC_HW_PX_MAP_CRB_PGN1,
+ QLCNIC_HW_PX_MAP_CRB_PGN2,
+ QLCNIC_HW_PX_MAP_CRB_PGN3,
+ QLCNIC_HW_PX_MAP_CRB_PGND,
+ QLCNIC_HW_PX_MAP_CRB_PGNI,
+ QLCNIC_HW_PX_MAP_CRB_PGS0,
+ QLCNIC_HW_PX_MAP_CRB_PGS1,
+ QLCNIC_HW_PX_MAP_CRB_PGS2,
+ QLCNIC_HW_PX_MAP_CRB_PGS3,
+ QLCNIC_HW_PX_MAP_CRB_PGSD,
+ QLCNIC_HW_PX_MAP_CRB_PGSI,
+ QLCNIC_HW_PX_MAP_CRB_SN,
+ QLCNIC_HW_PX_MAP_CRB_PGR2,
+ QLCNIC_HW_PX_MAP_CRB_EG,
+ QLCNIC_HW_PX_MAP_CRB_PH2,
+ QLCNIC_HW_PX_MAP_CRB_PS2,
+ QLCNIC_HW_PX_MAP_CRB_CAM,
+ QLCNIC_HW_PX_MAP_CRB_CAS0,
+ QLCNIC_HW_PX_MAP_CRB_CAS1,
+ QLCNIC_HW_PX_MAP_CRB_CAS2,
+ QLCNIC_HW_PX_MAP_CRB_C2C0,
+ QLCNIC_HW_PX_MAP_CRB_C2C1,
+ QLCNIC_HW_PX_MAP_CRB_TIMR,
+ QLCNIC_HW_PX_MAP_CRB_PGR3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX1,
+ QLCNIC_HW_PX_MAP_CRB_RPMX2,
+ QLCNIC_HW_PX_MAP_CRB_RPMX3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX4,
+ QLCNIC_HW_PX_MAP_CRB_RPMX5,
+ QLCNIC_HW_PX_MAP_CRB_RPMX6,
+ QLCNIC_HW_PX_MAP_CRB_RPMX7,
+ QLCNIC_HW_PX_MAP_CRB_XDMA,
+ QLCNIC_HW_PX_MAP_CRB_I2Q,
+ QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+ QLCNIC_HW_PX_MAP_CRB_CAS3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX0,
+ QLCNIC_HW_PX_MAP_CRB_RPMX8,
+ QLCNIC_HW_PX_MAP_CRB_RPMX9,
+ QLCNIC_HW_PX_MAP_CRB_OCM0,
+ QLCNIC_HW_PX_MAP_CRB_OCM1,
+ QLCNIC_HW_PX_MAP_CRB_SMB,
+ QLCNIC_HW_PX_MAP_CRB_I2C0,
+ QLCNIC_HW_PX_MAP_CRB_I2C1,
+ QLCNIC_HW_PX_MAP_CRB_LPC,
+ QLCNIC_HW_PX_MAP_CRB_PGNC,
+ QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A) \
+ (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB \
+ QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_MN_2M (0)
+#define QLCNIC_PCI_MS_2M (0x80000)
+#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL (0x000)
+#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN 200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX 50
+#define QLCNIC_NIU_MAX_GBE_PORTS 4
+#define QLCNIC_NIU_MAX_XG_PORTS 2
+
+#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
+ (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
+ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START 1
+#define TA_CTL_ENABLE 2
+#define TA_CTL_WRITE 4
+#define TA_CTL_BUSY 8
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
+#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
+#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
+#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
+#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
+#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
+#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
+#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
+#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
+#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
+#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
+
+#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
+#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
+
+#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
+
+#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
+#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
+
+#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
+
+#define CRB_V2P_0 (QLCNIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compability
+ */
+#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
+#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
+#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
+#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
+
+#define INTR_SCHEME_PERPORT 0x1
+#define MSI_MODE_MULTIFUNC 0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x) ((x) >> 16)
+#define qlcnic_get_temp_state(x) ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE 0
+#define QLCNIC_PORT_MODE_XG 1
+#define QLCNIC_PORT_MODE_GB 2
+#define QLCNIC_PORT_MODE_802_3_AP 3
+#define QLCNIC_PORT_MODE_AUTO_NEG 4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
+#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
+#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
+#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
+#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
+
+#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
+
+ /* Device State */
+#define QLCNIC_DEV_COLD 1
+#define QLCNIC_DEV_INITALIZING 2
+#define QLCNIC_DEV_READY 3
+#define QLCNIC_DEV_NEED_RESET 4
+#define QLCNIC_DEV_NEED_QUISCENT 5
+#define QLCNIC_DEV_FAILED 6
+
+#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
+#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
+#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+
+#define FW_POLL_DELAY (2 * HZ)
+#define FW_FAIL_THRESH 3
+#define FW_POLL_THRESH 10
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qlcnic_legacy_intr_set {
+ u32 int_vec_bit;
+ u32 tgt_status_reg;
+ u32 tgt_mask_reg;
+ u32 pci_int_reg;
+};
+
+#define QLCNIC_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word) \
+ _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word) \
+ _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word) \
+ _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE 0
+#define QLCNIC_NIU_PROMISC_MODE 1
+#define QLCNIC_NIU_ALLMULTI_MODE 2
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000000..e73ba455aa20
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1260 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+#include <linux/slab.h>
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+
+static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ return NULL;
+}
+
+static const struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ return -EIO;
+ msleep(1);
+ }
+
+ if (id_reg)
+ QLCWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer, consumer;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ __netif_tx_unlock_bh(tx_ring->txq);
+ adapter->stats.xmit_off++;
+ return -EBUSY;
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ unsigned op)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_mac_req *mac_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, 6);
+
+ return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ /* look up if already exists */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ return 0;
+ }
+
+ cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ if (cur == NULL) {
+ dev_err(&adapter->netdev->dev,
+ "failed to add mac address filter\n");
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+
+ return qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dev_mc_list *mc_ptr;
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u32 mode = VPORT_MISS_MODE_DROP;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr);
+ qlcnic_nic_add_mac(adapter, bcast_addr);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
+ qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr);
+ }
+ }
+
+send_fw_cmd:
+ qlcnic_nic_set_promisc(adapter, mode);
+}
+
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return qlcnic_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word[6];
+ int rv, i;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
+
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure hw lro request\n");
+
+ adapter->flags ^= QLCNIC_LRO_ENABLED;
+
+ return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure bridge mode request\n");
+
+ adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int i, rv;
+
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < 5; i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+ return rv;
+}
+
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ req.words[1] = cpu_to_le64(ip);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x reuqest\n",
+ (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ return rv;
+}
+
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not configure link notification\n");
+
+ return rv;
+}
+
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not cleanup lro flows\n");
+
+ return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ if (mtu > P3_MAX_MTU) {
+ dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
+ P3_MAX_MTU);
+ return -EINVAL;
+ }
+
+ rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
+{
+ u32 crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ mac_lo = QLCRD32(adapter, crbaddr);
+ mac_hi = QLCRD32(adapter, crbaddr+4);
+
+ if (pci_func & 1)
+ *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ const struct crb_128M_2M_sub_block_map *m;
+
+ if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw.pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+
+ if (adapter->ahw.crb_win == window)
+ return;
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ }
+ adapter->ahw.crb_win = window;
+}
+
+int
+qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+u32
+qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+
+void __iomem *
+qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
+
+ return addr;
+}
+
+
+static int
+qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if ((addr & 0x00ff800) == 0xff800) {
+ if (printk_ratelimit())
+ dev_warn(&pdev->dev, "QM access not handled\n");
+ return -EIO;
+ }
+
+ window = OCM_WIN_P3P(addr);
+
+ writel(window, adapter->ahw.ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw.ocm_win_crb);
+
+ adapter->ahw.ocm_win = window;
+ *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr, *mem_ptr = NULL;
+ resource_size_t mem_base;
+ int ret;
+ u32 start;
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ ret = qlcnic_pci_set_window_2M(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ addr = mem_ptr + (start & (PAGE_SIZE - 1));
+
+noremap:
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+#define MAX_CTL_CHECK 1000
+
+int
+qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 data)
+{
+ int i, j, ret;
+ u32 temp, off8;
+ u64 stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ i = 0;
+ if (stride == 16) {
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+ }
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+done:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int
+qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val, stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
+ return qlcnic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ off8 = MIU_TEST_AGT_RDDATA_LO;
+ if ((stride == 16) && (off & 0xf))
+ off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
+
+ temp = readl(mem_crb + off8 + 4);
+ val = (u64)temp << 32;
+ val |= readl(mem_crb + off8);
+ *data = val;
+ ret = 0;
+ }
+
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = QLCNIC_FW_MAGIC_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != QLCNIC_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = QLCNIC_BRDTYPE_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ adapter->ahw.board_type = board_type;
+
+ if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = QLCNIC_BRDTYPE_P3_10G_TP;
+ }
+
+ switch (board_type) {
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+ adapter->ahw.port_type = QLCNIC_GBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ adapter->ahw.port_type = (adapter->portnum < 2) ?
+ QLCNIC_XGBE : QLCNIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[1] = cpu_to_le64(state);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+ return rv;
+}
+
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev,
+ "%sting loopback mode failed.\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_set_fw_loopback(adapter, 1))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, 0);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ int mode = VPORT_MISS_MODE_DROP;
+ struct net_device *netdev = adapter->netdev;
+
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000000..9d2c124048fa
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1678 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "qlcnic.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == QLCNIC_BUFFER_FREE)
+ continue;
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_buffer *cmd_buf;
+ struct qlcnic_skb_frag *buffrag;
+ int i, j;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 0; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int ring, i, size;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ size = sizeof(struct qlcnic_host_tx_ring);
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (tx_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
+ rds_ring = kzalloc(size, GFP_KERNEL);
+ if (rds_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ return -ENOMEM;
+ }
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ }
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->num_desc = adapter->num_lro_rxd;
+ rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
+ vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL) {
+ dev_err(&netdev->dev, "Failed to allocate "
+ "rx buffer ring %d\n", ring);
+ goto err_out;
+ }
+ memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf->state = QLCNIC_BUFFER_FREE;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ qlcnic_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = QLCNIC_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == QLCNIC_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC 100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+ int addr, int *valp)
+{
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qlcnic_wait_rom_done(adapter)) {
+ dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = qlcnic_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (qlcnic_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* resetall */
+ qlcnic_rom_lock(adapter);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qlcnic_rom_unlock(adapter);
+
+ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+ qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+ dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ if (n >= 1024) {
+ dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = qlcnic_decode_crb_addr(buf[i].addr);
+ if (off == QLCNIC_ADDR_ERROR) {
+ dev_err(&pdev->dev, "CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += QLCNIC_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLCNIC_CAM_RAM(0x1fc))
+ continue;
+ if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+ continue;
+ if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+ continue;
+ /* skip the function enable register */
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+ continue;
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+ init_delay = 1000;
+
+ QLCWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* p2dn replyCount */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 & 1*/
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ capability = 0;
+
+ qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
+ }
+ return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+ u32 i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+#define FILEHEADER_SIZE (14 * 4)
+
+static int
+qlcnic_validate_header(struct qlcnic_adapter *adapter)
+{
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 fw_file_size = adapter->fw->size;
+ __le32 entries;
+ __le32 entry_size;
+ __le32 tab_size;
+
+ if (fw_file_size < FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_BOOTLD_IDX_OFF));
+ __le32 offs;
+ __le32 tab_size;
+ __le32 data_size;
+
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = descr->findex + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_fw(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_FIRMWARE_IDX_OFF));
+ __le32 offs;
+ __le32 tab_size;
+ __le32 data_size;
+
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = descr->findex + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ int mn_present = qlcnic_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ __le32 tab_size;
+ u32 i;
+
+ ptab_descr = qlcnic_get_table_desc(unirom,
+ QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+ if (!ptab_descr)
+ return -EINVAL;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw.revision_id;
+ u32 flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+ if (mn_present) {
+ mn_present = 0;
+ goto nomn;
+ }
+ return -EINVAL;
+}
+
+static int
+qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = qlcnic_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_BOOTLD_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_IMAGE_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static __le32
+qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+
+ fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ if (ret != 3)
+ return 0;
+ else
+ return major + (minor << 8) + (sub << 16);
+ }
+ }
+
+ return 0;
+}
+
+static __le32
+qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(
+ *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + QLCNIC_UNI_BIOS_VERSION_OFF));
+
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ /* last attempt had failed */
+ if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ val = qlcnic_get_fw_version(adapter);
+
+ version = QLCNIC_DECODE_VERSION(val);
+
+ major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (version > QLCNIC_VERSION_CODE(major, minor, build))
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char *fw_name[] = {
+ QLCNIC_UNIFIED_ROMIMAGE_NAME,
+ QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (fw) {
+ __le64 data;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+ flashaddr = QLCNIC_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ } else {
+ u64 data;
+ u32 hi, lo;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ data = (((u64)hi << 32) | lo);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+ return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+ __le32 val;
+ u32 ver, min_ver, bios, min_size;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+
+ if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+ if (qlcnic_validate_unified_romimage(adapter))
+ return -EINVAL;
+
+ min_size = QLCNIC_UNI_FW_MIN_SIZE;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLCNIC_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ val = qlcnic_get_fw_version(adapter);
+
+ min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
+
+ ver = QLCNIC_DECODE_VERSION(val);
+
+ if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ return -EINVAL;
+ }
+
+ val = qlcnic_get_bios_version(adapter);
+ qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer */
+ if (qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&val))
+ return -EIO;
+
+ val = QLCNIC_DECODE_VERSION(val);
+ if (val > ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case QLCNIC_UNKNOWN_ROMIMAGE:
+ fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+ break;
+
+ case QLCNIC_UNIFIED_ROMIMAGE:
+ default:
+ fw_type = QLCNIC_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc;
+
+ adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+ qlcnic_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = qlcnic_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw)
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 60;
+
+ do {
+ val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(500);
+
+ } while (--retries);
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 2000;
+
+ do {
+ val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+ QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
+static void
+qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
+ "length %d\n", cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+}
+
+static void
+qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ buffer->skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!buffer->skb) {
+ adapter->stats.skb_alloc_failure++;
+ return -ENOMEM;
+ }
+
+ skb = buffer->skb;
+
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
+
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ dev_kfree_skb_any(skb);
+ buffer->skb = NULL;
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+ buffer->state = QLCNIC_BUFFER_BUSY;
+
+ return 0;
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
+
+ if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ skb->dev = adapter->netdev;
+
+ buffer->skb = NULL;
+no_skb:
+ buffer->state = QLCNIC_BUFFER_FREE;
+ return skb;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
+
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+
+ return buffer;
+}
+
+int
+qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0, sts_data1;
+
+ int count = 0;
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (rxbuf)
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct qlcnic_host_rds_ring *rds_ring =
+ &adapter->recv_ctx.rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct qlcnic_rx_buffer, list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void
+qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ spin_lock(&rds_ring->lock);
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+ spin_unlock(&rds_ring->lock);
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ skb_put(skb, rds_ring->skb_size);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+
+ if (!qlcnic_check_loopback_buff(skb->data))
+ adapter->diag_cnt++;
+
+ dev_kfree_skb_any(skb);
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0;
+
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
+ ring, sts_data0);
+
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000000..234dab1f9982
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2726 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+
+MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
+ QLCNIC_LINUX_VERSIONID;
+
+static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+module_param(use_msi, int, 0644);
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+
+static int use_msi_x = 1;
+module_param(use_msi_x, int, 0644);
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+
+static int __devinit qlcnic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void __devexit qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout_task(struct work_struct *work);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+static void qlcnic_fw_poll_work(struct work_struct *work);
+static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay);
+static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
+static int qlcnic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+
+static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+
+static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+void
+qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+
+ if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
+ netif_stop_queue(adapter->netdev);
+ smp_mb();
+ adapter->stats.xmit_off++;
+ }
+}
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static int
+qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return (recv_ctx->sds_rings == NULL);
+}
+
+static void
+qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+ if (recv_ctx->sds_rings != NULL)
+ kfree(recv_ctx->sds_rings);
+
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_int(sds_ring);
+ }
+}
+
+static void
+qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u64 mask, cmask;
+
+ adapter->pci_using_dac = 0;
+
+ mask = DMA_BIT_MASK(39);
+ cmask = mask;
+
+ if (pci_set_dma_mask(pdev, mask) == 0 &&
+ pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
+{
+ int change, shift, err;
+ u64 mask, old_mask, old_cmask;
+ struct pci_dev *pdev = adapter->pdev;
+
+ change = 0;
+
+ shift = QLCRD32(adapter, CRB_DMA_SHIFT);
+ if (shift > 32)
+ return 0;
+
+ if (shift > 9)
+ change = 1;
+
+ if (change) {
+ old_mask = pdev->dma_mask;
+ old_cmask = pdev->dev.coherent_dma_mask;
+
+ mask = DMA_BIT_MASK(32+shift);
+
+ err = pci_set_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+
+ err = pci_set_consistent_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+ dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+ }
+
+ return 0;
+
+err_out:
+ pci_set_dma_mask(pdev, old_mask);
+ pci_set_consistent_dma_mask(pdev, old_cmask);
+ return err;
+}
+
+static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.board_type;
+ if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
+ (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
+ data = QLCNIC_PORT_MODE_802_3_AP;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_XG) {
+ data = QLCNIC_PORT_MODE_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else {
+ data = QLCNIC_PORT_MODE_AUTO_NEG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ }
+
+ if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+ }
+ QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
+ }
+}
+
+static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+ int pos;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_dword(pdev, pos, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pos, control);
+ }
+}
+
+static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ u64 mac_addr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = *(p + 5 - i);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+ netdev->dev_addr);
+
+ return 0;
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ qlcnic_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ qlcnic_set_multi(adapter->netdev);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ qlcnic_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+ .ndo_open = qlcnic_open,
+ .ndo_stop = qlcnic_close,
+ .ndo_start_xmit = qlcnic_xmit_frame,
+ .ndo_get_stats = qlcnic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = qlcnic_set_multi,
+ .ndo_set_mac_address = qlcnic_set_mac,
+ .ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_tx_timeout = qlcnic_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = qlcnic_poll_controller,
+#endif
+};
+
+static void
+qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, num_msix;
+
+ if (adapter->rss_supported) {
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ } else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+
+ adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+ legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_status_reg);
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
+ ISR_INT_STATE_REG);
+
+ qlcnic_set_msix_bit(pdev, 0);
+
+ if (adapter->msix_supported) {
+
+ qlcnic_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ qlcnic_set_msix_bit(pdev, 1);
+
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return;
+ }
+
+ if (err > 0)
+ pci_disable_msix(pdev);
+
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= QLCNIC_MSI_ENABLED;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ msi_tgt_status[adapter->ahw.pci_func]);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return;
+ }
+
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+}
+
+static void
+qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & QLCNIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.pci_base0 != NULL)
+ iounmap(adapter->ahw.pci_base0);
+}
+
+static int
+qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+{
+ void __iomem *mem_ptr0 = NULL;
+ resource_size_t mem_base;
+ unsigned long mem_len, pci_len0 = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+ int pci_func = adapter->ahw.pci_func;
+
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+
+ mem_ptr0 = pci_ioremap_bar(pdev, 0);
+ if (mem_ptr0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ adapter->ahw.pci_base0 = mem_ptr0;
+ adapter->ahw.pci_len0 = pci_len0;
+
+ adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+ return 0;
+}
+
+static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, found = 0;
+
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (qlcnic_boards[i].vendor == pdev->vendor &&
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
+ qlcnic_boards[i].sub_device == pdev->subsystem_device) {
+ strcpy(name, qlcnic_boards[i].short_name);
+ found = 1;
+ break;
+ }
+
+ }
+
+ if (!found)
+ name = "Unknown";
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ char serial_num[32];
+ int i, offset, val;
+ int *ptr32;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->driver_mismatch = 0;
+
+ ptr32 = (int *)&serial_num;
+ offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
+ for (i = 0; i < 8; i++) {
+ if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
+ dev_err(&pdev->dev, "error reading board info\n");
+ adapter->driver_mismatch = 1;
+ return;
+ }
+ ptr32[i] = cpu_to_le32(val);
+ offset += sizeof(u32);
+ }
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ if (adapter->portnum == 0) {
+ get_brd_name(adapter, brd_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, adapter->ahw.revision_id);
+ }
+
+ if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
+ adapter->driver_mismatch = 1;
+ dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+ fw_major, fw_minor, fw_build);
+ return;
+ }
+
+ i = QLCRD32(adapter, QLCNIC_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+
+ dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
+ fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+ if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+ adapter->flags &= ~QLCNIC_LRO_ENABLED;
+
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ } else if (adapter->ahw.port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ adapter->num_lro_rxd = 0;
+ adapter->max_rds_rings = 2;
+}
+
+static int
+qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int val, err, first_boot;
+
+ err = qlcnic_set_dma_mask(adapter);
+ if (err)
+ return err;
+
+ if (!qlcnic_can_start_firmware(adapter))
+ goto wait_init;
+
+ first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
+ if (first_boot == 0x55555555)
+ /* This is the first boot after power up */
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+
+ qlcnic_request_firmware(adapter);
+
+ err = qlcnic_need_fw_reset(adapter);
+ if (err < 0)
+ goto err_out;
+ if (err == 0)
+ goto wait_init;
+
+ if (first_boot != 0x55555555) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ qlcnic_pinit_from_rom(adapter);
+ msleep(1);
+ }
+
+ QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+ qlcnic_set_port_mode(adapter);
+
+ err = qlcnic_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_release_firmware(adapter);
+
+ val = (_QLCNIC_LINUX_MAJOR << 16)
+ | ((_QLCNIC_LINUX_MINOR << 8))
+ | (_QLCNIC_LINUX_SUBVERSION);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, val);
+
+wait_init:
+ /* Handshake with the card before we register the devices. */
+ err = qlcnic_phantom_init(adapter);
+ if (err)
+ goto err_out;
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+
+ qlcnic_update_dma_mask(adapter);
+
+ qlcnic_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ /* fall through and release firmware */
+
+err_out:
+ qlcnic_release_firmware(adapter);
+ return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct qlcnic_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ handler = qlcnic_tmp_intr;
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ flags |= IRQF_SHARED;
+
+ } else {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ handler = qlcnic_msix_intr;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED)
+ handler = qlcnic_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = qlcnic_intr;
+ }
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static void
+qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
+{
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ adapter->coal.normal.data.tx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
+ adapter->coal.normal.data.tx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+static int
+__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ qlcnic_set_multi(netdev);
+ qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw.linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ qlcnic_config_rss(adapter, 1);
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+ qlcnic_napi_enable(adapter);
+
+ qlcnic_linkevent_request(adapter, 1);
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static int
+qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __qlcnic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static void
+__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ qlcnic_free_mac_list(adapter);
+
+ qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+
+ qlcnic_napi_disable(adapter);
+
+ qlcnic_release_tx_buffers(adapter);
+ spin_unlock(&adapter->tx_clean_lock);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static void
+qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = qlcnic_init_firmware(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = qlcnic_alloc_sw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting sw resources\n");
+ return err;
+ }
+
+ err = qlcnic_alloc_hw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting hw resources\n");
+ goto err_out_free_sw;
+ }
+
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
+ err = qlcnic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup interrupt\n");
+ goto err_out_free_rxbuf;
+ }
+
+ qlcnic_init_coalesce_defaults(adapter);
+
+ qlcnic_create_sysfs_entries(adapter);
+
+ adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_rxbuf:
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+ qlcnic_free_sw_resources(adapter);
+ return err;
+}
+
+static void
+qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_remove_sysfs_entries(adapter);
+
+ qlcnic_free_hw_resources(adapter);
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_irq(adapter);
+ qlcnic_napi_del(adapter);
+ qlcnic_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ }
+ }
+
+ qlcnic_detach(adapter);
+
+ adapter->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ return;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->diag_test = test;
+
+ ret = qlcnic_attach(adapter);
+ if (ret)
+ return ret;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_enable_int(sds_ring);
+ }
+ }
+
+ return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (!err)
+ err = __qlcnic_up(adapter, netdev);
+
+ if (err)
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+done:
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->rx_csum = 1;
+ adapter->mc_enabled = 0;
+ adapter->max_mc_count = 38;
+
+ netdev->netdev_ops = &qlcnic_netdev_ops;
+ netdev->watchdog_timeo = 2*HZ;
+
+ qlcnic_change_mtu(netdev, netdev->mtu);
+
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev->features |= (NETIF_F_GRO);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+
+ netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+
+ if (adapter->pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+ netdev->features |= (NETIF_F_HW_VLAN_TX);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ netdev->features |= NETIF_F_LRO;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devinit
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct qlcnic_adapter *adapter = NULL;
+ int err;
+ int pci_func_id = PCI_FUNC(pdev->devfn);
+ uint8_t revision_id;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, qlcnic_driver_name);
+ if (err)
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ if (!netdev) {
+ dev_err(&pdev->dev, "failed to allocate net_device\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw.pci_func = pci_func_id;
+
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ rwlock_init(&adapter->ahw.crb_lock);
+ mutex_init(&adapter->ahw.mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+
+ err = qlcnic_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_netdev;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = pci_func_id;
+
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+
+ err = qlcnic_start_firmware(adapter);
+ if (err)
+ goto err_out_decr_ref;
+
+ /*
+ * See if the firmware gave us a virtual-physical port mapping.
+ */
+ adapter->physical_port = adapter->portnum;
+
+ qlcnic_clear_stats(adapter);
+
+ qlcnic_setup_intr(adapter);
+
+ err = qlcnic_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw.port_type) {
+ case QLCNIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case QLCNIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ qlcnic_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+
+err_out_decr_ref:
+ qlcnic_clr_all_drv_state(adapter);
+
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit qlcnic_remove(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ qlcnic_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_teardown_intr(adapter);
+
+ qlcnic_remove_diag_entries(adapter);
+
+ qlcnic_cleanup_pci_map(adapter);
+
+ qlcnic_release_firmware(adapter);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(netdev);
+}
+static int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ return 0;
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+ if (__qlcnic_shutdown(pdev))
+ return;
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+
+ retval = __qlcnic_shutdown(pdev);
+ if (retval)
+ return retval;
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int
+qlcnic_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto err_out;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out_detach;
+
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+
+err_out_detach:
+ qlcnic_detach(adapter);
+err_out:
+ qlcnic_clr_all_drv_state(adapter);
+ return err;
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->driver_mismatch)
+ return -EIO;
+
+ err = qlcnic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ qlcnic_detach(adapter);
+ return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ __qlcnic_down(adapter, netdev);
+ return 0;
+}
+
+static void
+qlcnic_tso_check(struct net_device *netdev,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = TX_ETHER_PKT;
+ __be16 protocol = skb->protocol;
+ u16 flags = 0, vid = 0;
+ u32 producer;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+
+ } else if (vlan_tx_tag_present(skb)) {
+
+ flags = FLAGS_VLAN_OOB;
+ vid = vlan_tx_tag_get(skb);
+ qlcnic_set_tx_vlan_tci(first_desc, vid);
+ vlan_oob = 1;
+ }
+
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ if (vlan_oob) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+ }
+
+ opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+ TX_TCP_LSO6 : TX_TCP_LSO;
+ tso = 1;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (!tso)
+ return;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ producer = tx_ring->producer;
+ copied = 0;
+ offset = 2;
+
+ if (vlan_oob) {
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vid);
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ barrier();
+ adapter->stats.lso_frames++;
+}
+
+static int
+qlcnic_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = pci_map_page(pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = frag->size;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static inline void
+qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+}
+
+netdev_tx_t
+qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ int i, k;
+
+ u32 producer;
+ int frag_count, no_of_desc;
+ u32 num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 4 fragments per cmd des */
+ no_of_desc = (frag_count + 3) >> 2;
+
+ if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+ netif_stop_queue(netdev);
+ adapter->stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ goto drop_packet;
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+
+ qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = QLCRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = qlcnic_get_temp_state(temp);
+ temp_val = qlcnic_get_temp_val(temp);
+
+ if (temp_state == QLCNIC_TEMP_PANIC) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ rv = 1;
+ } else if (temp_state == QLCNIC_TEMP_WARN) {
+ if (adapter->temp == QLCNIC_TEMP_NORMAL) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ temp_val);
+ }
+ } else {
+ if (adapter->temp == QLCNIC_TEMP_WARN) {
+ dev_info(&netdev->dev,
+ "Device temperature is now %d degrees C"
+ " in normal range.\n", temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw.linkup && !linkup) {
+ dev_info(&netdev->dev, "NIC Link is down\n");
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else if (!adapter->ahw.linkup && linkup) {
+ dev_info(&netdev->dev, "NIC Link is up\n");
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void qlcnic_tx_timeout_task(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter =
+ container_of(work, struct qlcnic_adapter, tx_timeout_task);
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
+ goto request_reset;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ if (!qlcnic_reset_context(adapter)) {
+ adapter->netdev->trans_start = jiffies;
+ return;
+
+ /* context reset failed, fall through for fw reset */
+ }
+
+request_reset:
+ adapter->need_fw_reset = 1;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ writel(0xffffffff, adapter->tgt_status_reg);
+ goto done;
+ }
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->diag_cnt++;
+ qlcnic_enable_int(sds_ring);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+ int done;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ __netif_tx_lock(tx_ring->txq, smp_processor_id());
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_wake_queue(netdev);
+ adapter->tx_timeo_cnt = 0;
+ adapter->stats.xmit_on++;
+ }
+ __netif_tx_unlock(tx_ring->txq);
+ }
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter);
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ disable_irq(adapter->irq);
+ qlcnic_intr(adapter->irq, adapter);
+ enable_irq(adapter->irq);
+}
+#endif
+
+static void
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+{
+ u32 val;
+
+ WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+ state != QLCNIC_DEV_NEED_QUISCENT);
+
+ if (qlcnic_api_lock(adapter))
+ return ;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+ if (state == QLCNIC_DEV_NEED_RESET)
+ val |= ((u32)0x1 << (adapter->portnum * 4));
+ else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ return -EBUSY;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+
+ if (!(val & 0x11111111))
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+err:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_START_FW, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+ int act, state;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+
+ if (((state & 0x11111111) == (act & 0x11111111)) ||
+ ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+ return 0;
+ else
+ return 1;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val, prev_state;
+ int cnt = 0;
+ int portnum = adapter->portnum;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ if (!(val & ((int)0x1 << (portnum * 4)))) {
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
+ goto start_fw;
+ }
+
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ switch (prev_state) {
+ case QLCNIC_DEV_COLD:
+start_fw:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ qlcnic_api_unlock(adapter);
+ return 1;
+
+ case QLCNIC_DEV_READY:
+ qlcnic_api_unlock(adapter);
+ return 0;
+
+ case QLCNIC_DEV_NEED_RESET:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_NEED_QUISCENT:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_FAILED:
+ qlcnic_api_unlock(adapter);
+ return -1;
+ }
+
+ qlcnic_api_unlock(adapter);
+ msleep(1000);
+ while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
+ ++cnt < 20)
+ msleep(1000);
+
+ if (cnt >= 20)
+ return -1;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ int dev_state;
+
+ if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ goto err_ret;
+
+ if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+
+ if (qlcnic_check_drv_state(adapter)) {
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
+ }
+
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+
+ goto err_ret;
+ }
+
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ switch (dev_state) {
+ case QLCNIC_DEV_READY:
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+ case QLCNIC_DEV_FAILED:
+ break;
+
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
+ return;
+ }
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ rtnl_lock();
+ qlcnic_detach(adapter);
+ rtnl_unlock();
+
+ status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR)
+ goto err_ret;
+
+ if (adapter->temp == QLCNIC_TEMP_PANIC)
+ goto err_ret;
+
+ qlcnic_set_drv_state(adapter, adapter->dev_state);
+
+ adapter->fw_wait_cnt = 0;
+
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+ return;
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+
+}
+
+static void
+qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ }
+
+ qlcnic_api_unlock(adapter);
+}
+
+static void
+qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
+{
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err) {
+ qlcnic_detach(adapter);
+ goto done;
+ }
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+
+done:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ if (!qlcnic_clr_drv_state(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+ u32 state = 0, heartbit;
+ struct net_device *netdev = adapter->netdev;
+
+ if (qlcnic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset) {
+ qlcnic_dev_request_reset(adapter);
+ goto detach;
+ }
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
+ adapter->need_fw_reset = 1;
+
+ heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbit != adapter->heartbit) {
+ adapter->heartbit = heartbit;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ qlcnic_dev_request_reset(adapter);
+
+ clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+ dev_info(&netdev->dev, "firmware hang detected\n");
+
+detach:
+ adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+ QLCNIC_DEV_NEED_RESET;
+
+ if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+
+ return 1;
+}
+
+static void
+qlcnic_fw_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ goto reschedule;
+
+
+ if (qlcnic_check_health(adapter))
+ return;
+
+reschedule:
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto err_out;
+
+ if (strict_strtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static ssize_t
+qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (strict_strtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+static int
+qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 4) || (offset & 0x3))
+ return -EINVAL;
+
+ if (offset < QLCNIC_PCI_CRBSPACE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ return size;
+}
+
+static int
+qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static void
+qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+static void
+qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+}
+
+
+static void
+qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static int
+qlcnic_destip_supported(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.cut_through)
+ return 0;
+
+ return 1;
+}
+
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (!qlcnic_destip_supported(adapter))
+ return;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+ return;
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev = (struct net_device *)ptr;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ qlcnic_config_indev_addr(dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL || !netif_running(dev))
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter || !qlcnic_destip_supported(adapter))
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qlcnic_netdev_cb = {
+ .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+ .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+
+static struct pci_driver qlcnic_driver = {
+ .name = qlcnic_driver_name,
+ .id_table = qlcnic_pci_tbl,
+ .probe = qlcnic_probe,
+ .remove = __devexit_p(qlcnic_remove),
+#ifdef CONFIG_PM
+ .suspend = qlcnic_suspend,
+ .resume = qlcnic_resume,
+#endif
+ .shutdown = qlcnic_shutdown
+};
+
+static int __init qlcnic_init_module(void)
+{
+
+ printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&qlcnic_netdev_cb);
+ register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+
+ return pci_register_driver(&qlcnic_driver);
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+
+ pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf3860..8b742b639ceb 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -19,14 +19,6 @@
#define DRV_VERSION "v1.00.00.23.00.00-01"
#define PFX "qlge: "
-#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
- do { \
- if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
- ; \
- else \
- dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
- "%s: " fmt, __func__, ##args); \
- } while (0)
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -54,12 +46,8 @@
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
-#define MAX_SPLIT_SIZE 1023
-#define QLGE_SB_PAD 32
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +67,43 @@
#define TX_DESC_PER_OAL 0
#endif
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x) ((u16)(x))
+#define MSW(x) ((u16)((u32)(x) >> 16))
+#define LSD(x) ((u32)((u64)(x)))
+#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
*/
enum {
MPI_TEST_FUNC_PORT_CFG = 0x1002,
+ MPI_TEST_FUNC_PRB_CTL = 0x100e,
+ MPI_TEST_FUNC_PRB_EN = 0x18a20000,
+ MPI_TEST_FUNC_RST_STS = 0x100a,
+ MPI_TEST_FUNC_RST_FRC = 0x00000003,
+ MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
+ MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
MPI_TEST_NIC1_FUNC_SHIFT = 1,
+ MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
+ MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
MPI_TEST_NIC2_FUNC_SHIFT = 5,
- MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
+ MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
+ MPI_TEST_FC1_FUNCTION_SHIFT = 9,
+ MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
+ MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
+ MPI_TEST_FC2_FUNCTION_SHIFT = 13,
+
+ MPI_NIC_READ = 0x00000000,
+ MPI_NIC_REG_BLOCK = 0x00020000,
+ MPI_NIC_FUNCTION_SHIFT = 6,
};
/*
@@ -468,7 +484,7 @@ enum {
MDIO_PORT = 0x00000440,
MDIO_STATUS = 0x00000450,
- /* XGMAC AUX statistics registers */
+ XGMAC_REGISTER_END = 0x00000740,
};
/*
@@ -509,6 +525,7 @@ enum {
enum {
MAC_ADDR_IDX_SHIFT = 4,
MAC_ADDR_TYPE_SHIFT = 16,
+ MAC_ADDR_TYPE_COUNT = 10,
MAC_ADDR_TYPE_MASK = 0x000f0000,
MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +543,30 @@ enum {
MAC_ADDR_MR = (1 << 30),
MAC_ADDR_MW = (1 << 31),
MAX_MULTICAST_ENTRIES = 32,
+
+ /* Entry count and words per entry
+ * for each address type in the filter.
+ */
+ MAC_ADDR_MAX_CAM_ENTRIES = 512,
+ MAC_ADDR_MAX_CAM_WCOUNT = 3,
+ MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
+ MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
+ MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
+ MAC_ADDR_MAX_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
+ MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
+ MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
+ MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
+ MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
+ MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
};
/*
@@ -596,6 +637,7 @@ enum {
enum {
RT_IDX_IDX_SHIFT = 8,
RT_IDX_TYPE_MASK = 0x000f0000,
+ RT_IDX_TYPE_SHIFT = 16,
RT_IDX_TYPE_RT = 0x00000000,
RT_IDX_TYPE_RT_INV = 0x00010000,
RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +706,89 @@ enum {
RT_IDX_UNUSED013 = 13,
RT_IDX_UNUSED014 = 14,
RT_IDX_PROMISCUOUS_SLOT = 15,
- RT_IDX_MAX_SLOTS = 16,
+ RT_IDX_MAX_RT_SLOTS = 8,
+ RT_IDX_MAX_NIC_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+ XG_SERDES_ADDR_RDY = (1 << 31),
+ XG_SERDES_ADDR_R = (1 << 30),
+
+ XG_SERDES_ADDR_STS = 0x00001E06,
+ XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
+ XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
+ XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
+
+ /* Serdes coredump definitions. */
+ XG_SERDES_XAUI_AN_START = 0x00000000,
+ XG_SERDES_XAUI_AN_END = 0x00000034,
+ XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
+ XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
+ XG_SERDES_XFI_AN_START = 0x00001000,
+ XG_SERDES_XFI_AN_END = 0x00001034,
+ XG_SERDES_XFI_TRAIN_START = 0x10001050,
+ XG_SERDES_XFI_TRAIN_END = 0x1000107C,
+ XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
+ XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
+ XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
+ XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
+ XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
+ XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
+ XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
+ XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
+};
+
+/*
+ * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
+ */
+enum {
+ PRB_MX_ADDR_ARE = (1 << 16),
+ PRB_MX_ADDR_UP = (1 << 15),
+ PRB_MX_ADDR_SWP = (1 << 14),
+
+ /* Module select values. */
+ PRB_MX_ADDR_MAX_MODS = 21,
+ PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
+ PRB_MX_ADDR_MOD_SEL_TBD = 0,
+ PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
+ PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
+ PRB_MX_ADDR_MOD_SEL_FRB = 3,
+ PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
+ PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
+ PRB_MX_ADDR_MOD_SEL_DA1 = 6,
+ PRB_MX_ADDR_MOD_SEL_DA2 = 7,
+ PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
+ PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
+ PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
+ PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
+ PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
+ PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
+ PRB_MX_ADDR_MOD_SEL_REG = 14,
+ PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
+ PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
+ PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
+ PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
+ PRB_MX_ADDR_MOD_SEL_MOP = 20,
+ /* Bit fields indicating which modules
+ * are valid for each clock domain.
+ */
+ PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
+ PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
+ PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
+ PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
+ PRB_MX_ADDR_VALID_TOTAL = 34,
+
+ /* Clock domain values. */
+ PRB_MX_ADDR_CLOCK_SHIFT = 6,
+ PRB_MX_ADDR_SYS_CLOCK = 0,
+ PRB_MX_ADDR_PCI_CLOCK = 2,
+ PRB_MX_ADDR_FC_CLOCK = 5,
+ PRB_MX_ADDR_XGM_CLOCK = 6,
+
+ PRB_MX_ADDR_MAX_MUX = 64,
};
/*
@@ -737,6 +861,21 @@ enum {
PRB_MX_DATA = 0xfc, /* Use semaphore */
};
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+ min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
/*
* CAM output format.
*/
@@ -1421,7 +1560,7 @@ struct nic_stats {
u64 rx_nic_fifo_drop;
};
-/* Address/Length pairs for the coredump. */
+/* Firmware coredump internal register address/length pairs. */
enum {
MPI_CORE_REGS_ADDR = 0x00030000,
MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1615,7 @@ struct mpi_coredump_segment_header {
u8 description[16];
};
-/* Reg dump segment numbers. */
+/* Firmware coredump header segment numbers. */
enum {
CORE_SEG_NUM = 1,
TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1666,67 @@ enum {
};
+/* There are 64 generic NIC registers. */
+#define NIC_REGS_DUMP_WORD_COUNT 64
+/* XGMAC word count. */
+#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
+/* Word counts for the SERDES blocks. */
+#define XG_SERDES_XAUI_AN_COUNT 14
+#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
+#define XG_SERDES_XFI_AN_COUNT 14
+#define XG_SERDES_XFI_TRAIN_COUNT 12
+#define XG_SERDES_XFI_HSS_PCS_COUNT 15
+#define XG_SERDES_XFI_HSS_TX_COUNT 32
+#define XG_SERDES_XFI_HSS_RX_COUNT 32
+#define XG_SERDES_XFI_HSS_PLL_COUNT 32
+
+/* There are 2 CNA ETS and 8 NIC ETS registers. */
+#define ETS_REGS_DUMP_WORD_COUNT 10
+
+/* Each probe mux entry stores the probe type plus 64 entries
+ * that are each each 64-bits in length. There are a total of
+ * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
+ */
+#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
+#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
+ PRB_MX_ADDR_VALID_TOTAL)
+/* Each routing entry consists of 4 32-bit words.
+ * They are route type, index, index word, and result.
+ * There are 2 route blocks with 8 entries each and
+ * 2 NIC blocks with 16 entries each.
+ * The totol entries is 48 with 4 words each.
+ */
+#define RT_IDX_DUMP_ENTRIES 48
+#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
+#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
+ RT_IDX_DUMP_WORDS_PER_ENTRY)
+/* There are 10 address blocks in filter, each with
+ * different entry counts and different word-count-per-entry.
+ */
+#define MAC_ADDR_DUMP_ENTRIES \
+ ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
+ (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
+ (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
+ (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
+#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
+#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
+ MAC_ADDR_DUMP_WORDS_PER_ENTRY)
+/* Maximum of 4 functions whose semaphore registeres are
+ * in the coredump.
+ */
+#define MAX_SEMAPHORE_FUNCTIONS 4
+/* Defines for access the MPI shadow registers. */
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+#define SHADOW_REG_SHIFT 20
+
struct ql_nic_misc {
u32 rx_ring_count;
u32 tx_ring_count;
@@ -1568,6 +1768,199 @@ struct ql_reg_dump {
u32 ets[8+2];
};
+struct ql_mpi_coredump {
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 1 */
+ struct mpi_coredump_segment_header core_regs_seg_hdr;
+ u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+ u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+ /* segment 2 */
+ struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+ u32 test_logic_regs[TEST_REGS_CNT];
+
+ /* segment 3 */
+ struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+ u32 rmii_regs[RMII_REGS_CNT];
+
+ /* segment 4 */
+ struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+ u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+ /* segment 5 */
+ struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+ u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+ /* segment 6 */
+ struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+ u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 7 */
+ struct mpi_coredump_segment_header ide_regs_seg_hdr;
+ u32 ide_regs[IDE_REGS_CNT];
+
+ /* segment 8 */
+ struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+ u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 9 */
+ struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+ u32 smbus_regs[SMBUS_REGS_CNT];
+
+ /* segment 10 */
+ struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+ u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 11 */
+ struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+ u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 12 */
+ struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+ u32 i2c_regs[I2C_REGS_CNT];
+ /* segment 13 */
+ struct mpi_coredump_segment_header memc_regs_seg_hdr;
+ u32 memc_regs[MEMC_REGS_CNT];
+
+ /* segment 14 */
+ struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+ u32 pbus_regs[PBUS_REGS_CNT];
+
+ /* segment 15 */
+ struct mpi_coredump_segment_header mde_regs_seg_hdr;
+ u32 mde_regs[MDE_REGS_CNT];
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 17 */
+ struct mpi_coredump_segment_header nic2_regs_seg_hdr;
+ u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 18 */
+ struct mpi_coredump_segment_header xgmac1_seg_hdr;
+ u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 19 */
+ struct mpi_coredump_segment_header xgmac2_seg_hdr;
+ u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 20 */
+ struct mpi_coredump_segment_header code_ram_seg_hdr;
+ u32 code_ram[CODE_RAM_CNT];
+
+ /* segment 21 */
+ struct mpi_coredump_segment_header memc_ram_seg_hdr;
+ u32 memc_ram[MEMC_RAM_CNT];
+
+ /* segment 22 */
+ struct mpi_coredump_segment_header xaui_an_hdr;
+ u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 23 */
+ struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+ u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 24 */
+ struct mpi_coredump_segment_header xfi_an_hdr;
+ u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 25 */
+ struct mpi_coredump_segment_header xfi_train_hdr;
+ u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 26 */
+ struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+ u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 27 */
+ struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+ u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 28 */
+ struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+ u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 29 */
+ struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+ u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_RX_RINGS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[ETS_REGS_DUMP_WORD_COUNT];
+
+ /* segment 35 */
+ struct mpi_coredump_segment_header probe_dump_seg_hdr;
+ u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
+
+ /* segment 36 */
+ struct mpi_coredump_segment_header routing_reg_seg_hdr;
+ u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
+
+ /* segment 37 */
+ struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
+ u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
+
+ /* segment 38 */
+ struct mpi_coredump_segment_header xaui2_an_hdr;
+ u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 39 */
+ struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
+ u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 40 */
+ struct mpi_coredump_segment_header xfi2_an_hdr;
+ u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 41 */
+ struct mpi_coredump_segment_header xfi2_train_hdr;
+ u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 42 */
+ struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
+ u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 43 */
+ struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
+ u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 44 */
+ struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
+ u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 45 */
+ struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
+ u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 50 */
+ /* semaphore register for all 5 functions */
+ struct mpi_coredump_segment_header sem_regs_seg_hdr;
+ u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
+};
+
/*
* intr_context structure is used during initialization
* to hook the interrupts. It is also used in a single
@@ -1603,6 +1996,8 @@ enum {
QL_CAM_RT_SET = 8,
QL_SELFTEST = 9,
QL_LB_LINK_UP = 10,
+ QL_FRC_COREDUMP = 11,
+ QL_EEH_FATAL = 12,
};
/* link_status bit definitions */
@@ -1724,6 +2119,8 @@ struct ql_adapter {
u32 port_link_up;
u32 port_init;
u32 link_status;
+ struct ql_mpi_coredump *mpi_coredump;
+ u32 core_is_dumped;
u32 link_config;
u32 led_config;
u32 max_frame_size;
@@ -1736,10 +2133,14 @@ struct ql_adapter {
struct delayed_work mpi_work;
struct delayed_work mpi_port_cfg_work;
struct delayed_work mpi_idc_work;
+ struct delayed_work mpi_core_to_log;
struct completion ide_completion;
struct nic_operations *nic_ops;
u16 device_id;
+ struct timer_list timer;
atomic_t lb_count;
+ /* Keep local copy of current mac address. */
+ char current_mac_addr[6];
};
/*
@@ -1807,6 +2208,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2219,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count);
+int ql_core_dump(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump);
+int ql_mb_sys_err(struct ql_adapter *qdev);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2244,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c4710761..362664628937 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,407 @@
+#include <linux/slab.h>
+
#include "qlge.h"
+/* Read a NIC register from the alternate function. */
+static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
+ u32 reg)
+{
+ u32 register_to_read;
+ u32 reg_val;
+ unsigned int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+ if (status != 0)
+ return 0xffffffff;
+
+ return reg_val;
+}
+
+/* Write a NIC register from the alternate function. */
+static int ql_write_other_func_reg(struct ql_adapter *qdev,
+ u32 reg, u32 reg_val)
+{
+ u32 register_to_read;
+ int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
+
+ return status;
+}
+
+static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
+ u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = 10;
+
+ while (count) {
+ temp = ql_read_other_func_reg(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit)
+ return -1;
+ else if (temp & bit)
+ return 0;
+ mdelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+exit:
+ return status;
+}
+
+/* Read out the SERDES registers */
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+ return status;
+}
+
+static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
+ u32 *direct_ptr, u32 *indirect_ptr,
+ unsigned int direct_valid, unsigned int indirect_valid)
+{
+ unsigned int status;
+
+ status = 1;
+ if (direct_valid)
+ status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *direct_ptr = 0xDEADBEEF;
+
+ status = 1;
+ if (indirect_valid)
+ status = ql_read_other_func_serdes_reg(
+ qdev, addr, indirect_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *indirect_ptr = 0xDEADBEEF;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
+ unsigned int xaui_indirect_valid, i;
+ u32 *direct_ptr, temp;
+ u32 *indirect_ptr;
+
+ xfi_direct_valid = xfi_indirect_valid = 0;
+ xaui_direct_valid = xaui_indirect_valid = 1;
+
+ /* The XAUI needs to be read out per port */
+ if (qdev->func & 1) {
+ /* We are NIC 2 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ } else {
+ /* We are NIC 1 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ }
+
+ /*
+ * XFI register is shared so only need to read one
+ * functions and then check the bits.
+ */
+ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+ if (status)
+ temp = 0;
+
+ if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
+ XG_SERDES_ADDR_XFI1_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_indirect_valid = 1;
+ else
+ xfi_direct_valid = 1;
+ }
+ if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
+ XG_SERDES_ADDR_XFI2_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_direct_valid = 1;
+ else
+ xfi_indirect_valid = 1;
+ }
+
+ /* Get XAUI_AN register block. */
+ if (qdev->func & 1) {
+ /* Function 2 is direct */
+ direct_ptr = mpi_coredump->serdes2_xaui_an;
+ indirect_ptr = mpi_coredump->serdes_xaui_an;
+ } else {
+ /* Function 1 is direct */
+ direct_ptr = mpi_coredump->serdes_xaui_an;
+ indirect_ptr = mpi_coredump->serdes2_xaui_an;
+ }
+
+ for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ }
+
+ for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_XFI_AN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_an;
+ indirect_ptr = mpi_coredump->serdes_xfi_an;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_an;
+ indirect_ptr = mpi_coredump->serdes2_xfi_an;
+ }
+
+ for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_TRAIN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_train;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_train;
+ }
+
+ for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ }
+
+ for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_TX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_tx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ }
+ for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_RX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_rx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+
+ /* Get XAUI_XFI_HSS_PLL register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ }
+ for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+ return 0;
+}
+
+static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status = 0;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+exit:
+ return status;
+}
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+ unsigned int other_function)
+{
+ int status = 0;
+ int i;
+
+ for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+ /* We're reading 400 xgmac registers, but we filter out
+ * serveral locations that are non-responsive to reads.
+ */
+ if ((i == 0x00000114) ||
+ (i == 0x00000118) ||
+ (i == 0x0000013c) ||
+ (i == 0x00000140) ||
+ (i > 0x00000150 && i < 0x000001fc) ||
+ (i > 0x00000278 && i < 0x000002a0) ||
+ (i > 0x000002c0 && i < 0x000002cf) ||
+ (i > 0x000002dc && i < 0x000002f0) ||
+ (i > 0x000003c8 && i < 0x00000400) ||
+ (i > 0x00000400 && i < 0x00000410) ||
+ (i > 0x00000410 && i < 0x00000420) ||
+ (i > 0x00000420 && i < 0x00000430) ||
+ (i > 0x00000430 && i < 0x00000440) ||
+ (i > 0x00000440 && i < 0x00000450) ||
+ (i > 0x00000450 && i < 0x00000500) ||
+ (i > 0x0000054c && i < 0x00000568) ||
+ (i > 0x000005c8 && i < 0x00000600)) {
+ if (other_function)
+ status =
+ ql_read_other_func_xgmac_reg(qdev, i, buf);
+ else
+ status = ql_read_xgmac_reg(qdev, i, buf);
+
+ if (status)
+ *buf = 0xdeadbeef;
+ break;
+ }
+ }
+ return status;
+}
static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
{
@@ -43,8 +445,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower MAC address */
@@ -55,8 +457,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_MULTI_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower Mcast address */
@@ -79,8 +481,8 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
for (i = 0; i < 16; i++) {
status = ql_get_routing_reg(qdev, i, &value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of routing index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of routing index register.\n");
goto err;
} else {
*buf++ = value;
@@ -91,6 +493,226 @@ err:
return status;
}
+/* Read the MPI Processor shadow registers */
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+{
+ u32 i;
+ int status;
+
+ for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+ status = ql_write_mpi_reg(qdev, RISC_124,
+ (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+ if (status)
+ goto end;
+ status = ql_read_mpi_reg(qdev, RISC_127, buf);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/* Read the MPI Processor core registers */
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+ u32 offset, u32 count)
+{
+ int i, status = 0;
+ for (i = 0; i < count; i++, buf++) {
+ status = ql_read_mpi_reg(qdev, offset + i, buf);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/* Read the ASIC probe dump */
+static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
+ u32 valid, u32 *buf)
+{
+ u32 module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
+ if (!((valid >> module) & 1))
+ continue;
+ for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
+ probe = clock
+ | PRB_MX_ADDR_ARE
+ | mux_sel
+ | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ lo_val = ql_read32(qdev, PRB_MX_DATA);
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf++;
+ }
+ probe |= PRB_MX_ADDR_UP;
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ hi_val = ql_read32(qdev, PRB_MX_DATA);
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ return buf;
+}
+
+static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+{
+ /* First we have to enable the probe mux */
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
+ return 0;
+
+}
+
+/* Read out the routing index registers */
+static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ int status;
+ u32 type, index, index_max;
+ u32 result_index;
+ u32 result_data;
+ u32 val;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (type = 0; type < 4; type++) {
+ if (type < 2)
+ index_max = 8;
+ else
+ index_max = 16;
+ for (index = 0; index < index_max; index++) {
+ val = RT_IDX_RS
+ | (type << RT_IDX_TYPE_SHIFT)
+ | (index << RT_IDX_IDX_SHIFT);
+ ql_write32(qdev, RT_IDX, val);
+ result_index = 0;
+ while ((result_index & RT_IDX_MR) == 0)
+ result_index = ql_read32(qdev, RT_IDX);
+ result_data = ql_read32(qdev, RT_DATA);
+ *buf = type;
+ buf++;
+ *buf = index;
+ buf++;
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read out the MAC protocol registers */
+static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 result_index, result_data;
+ u32 type;
+ u32 index;
+ u32 offset;
+ u32 val;
+ u32 initial_val = MAC_ADDR_RS;
+ u32 max_index;
+ u32 max_offset;
+
+ for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
+ switch (type) {
+
+ case 0: /* CAM */
+ initial_val |= MAC_ADDR_ADR;
+ max_index = MAC_ADDR_MAX_CAM_ENTRIES;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 1: /* Multicast MAC Address */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 2: /* VLAN filter mask */
+ case 3: /* MC filter mask */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 4: /* FC MAC addresses */
+ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
+ break;
+ case 5: /* Mgmt MAC addresses */
+ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
+ break;
+ case 6: /* Mgmt VLAN addresses */
+ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
+ break;
+ case 7: /* Mgmt IPv4 address */
+ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
+ break;
+ case 8: /* Mgmt IPv6 address */
+ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
+ break;
+ case 9: /* Mgmt TCP/UDP Dest port */
+ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
+ break;
+ default:
+ printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+ for (index = 0; index < max_index; index++) {
+ for (offset = 0; offset < max_offset; offset++) {
+ val = initial_val
+ | (type << MAC_ADDR_TYPE_SHIFT)
+ | (index << MAC_ADDR_IDX_SHIFT)
+ | (offset);
+ ql_write32(qdev, MAC_ADDR_IDX, val);
+ result_index = 0;
+ while ((result_index & MAC_ADDR_MR) == 0) {
+ result_index = ql_read32(qdev,
+ MAC_ADDR_IDX);
+ }
+ result_data = ql_read32(qdev, MAC_ADDR_DATA);
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ }
+}
+
+static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 func_num, reg, reg_val;
+ int status;
+
+ for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
+ reg = MPI_NIC_REG_BLOCK
+ | (func_num << MPI_NIC_FUNCTION_SHIFT)
+ | (SEM / 4);
+ status = ql_read_mpi_reg(qdev, reg, &reg_val);
+ *buf = reg_val;
+ /* if the read failed then dead fill the element. */
+ if (!status)
+ *buf = 0xdeadbeef;
+ buf++;
+ }
+}
+
/* Create a coredump segment header */
static void ql_build_coredump_seg_header(
struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +725,526 @@ static void ql_build_coredump_seg_header(
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
+/*
+ * This function should be called when a coredump / probedump
+ * is to be extracted from the HBA. It is assumed there is a
+ * qdev structure that contains the base address of the register
+ * space for this function as well as a coredump structure that
+ * will contain the dump.
+ */
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ int i;
+
+ if (!mpi_coredump) {
+ netif_err(qdev, drv, qdev->ndev, "No memory available.\n");
+ return -ENOMEM;
+ }
+
+ /* Try to get the spinlock, but dont worry if
+ * it isn't available. If the firmware died it
+ * might be holding the sem.
+ */
+ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+ status = ql_pause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC pause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Insert the global header */
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_mpi_coredump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+ /* Get generic NIC reg dump */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+ NIC2_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+
+ /* Get XGMac registers. (Segment 18, Rev C. step 21) */
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+ NIC2_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+
+ if (qdev->func & 1) {
+ /* Odd means our function is NIC 2 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+ } else {
+ /* Even means our function is NIC 1 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+ }
+
+ /* Rev C. Step 20a */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_an),
+ "XAUI AN Registers");
+
+ /* Rev C. Step 20b */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+ "XAUI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_an),
+ "XFI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_train),
+ "XFI TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+ "XFI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+ "XFI HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+ "XFI HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+ "XFI HSS PLL Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+ XAUI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_an),
+ "XAUI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+ XAUI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+ "XAUI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+ XFI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_an),
+ "XFI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+ XFI2_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_train),
+ "XFI2 TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+ XFI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+ "XFI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+ XFI2_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+ "XFI2 HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+ XFI2_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+ "XFI2 HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+ XFI2_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+ "XFI2 HSS PLL Registers");
+
+ status = ql_get_serdes_regs(qdev, mpi_coredump);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM,
+ sizeof(mpi_coredump->core_regs_seg_hdr) +
+ sizeof(mpi_coredump->mpi_core_regs) +
+ sizeof(mpi_coredump->mpi_core_sh_regs),
+ "Core Registers");
+
+ /* Get the MPI Core Registers */
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ if (status)
+ goto err;
+ /* Get the 16 MPI shadow registers */
+ status = ql_get_mpi_shadow_regs(qdev,
+ &mpi_coredump->mpi_core_sh_regs[0]);
+ if (status)
+ goto err;
+
+ /* Get the Test Logic Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->test_logic_regs),
+ "Test Logic Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the RMII Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->rmii_regs),
+ "RMII Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC1 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac1_regs),
+ "FCMAC1 Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC2 Registers */
+
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac2_regs),
+ "FCMAC2 Registers");
+
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc1_mbx_regs),
+ "FC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the IDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ide_regs),
+ "IDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic1_mbx_regs),
+ "NIC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the SMBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->smbus_regs),
+ "SMBus Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc2_mbx_regs),
+ "FC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic2_mbx_regs),
+ "NIC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the I2C Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->i2c_regs),
+ "I2C Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MEMC Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_regs),
+ "MEMC Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the PBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->pbus_regs),
+ "PBUS Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mde_regs),
+ "MDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ goto err;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->probe_dump),
+ "Probe Dump");
+ ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->routing_regs),
+ "Routing Regs");
+ status = ql_get_routing_index_registers(qdev,
+ &mpi_coredump->routing_regs[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mac_prot_regs),
+ "MAC Prot Regs");
+ ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+
+ /* Get the semaphore registers for all 5 functions */
+ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+ SEM_REGS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->sem_regs), "Sem Registers");
+
+ ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+
+ /* Prevent the mpi restarting while we dump the memory.*/
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+
+ /* clear the pause */
+ status = ql_unpause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC unpause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Reset the RISC so we can dump RAM */
+ status = ql_hard_reset_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC reset. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->code_ram),
+ "WCS RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+ CODE_RAM_ADDR, CODE_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of CODE RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ /* Insert the segment header */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_ram),
+ "MEMC RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+err:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+
+}
+
+static void ql_get_core_dump(struct ql_adapter *qdev)
+{
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!netif_running(qdev->ndev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Force Coredump can only be done from interface that is up.\n");
+ return;
+ }
+
+ if (ql_mb_sys_err(qdev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Fail force coredump with ql_mb_sys_err().\n");
+ return;
+ }
+}
+
void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump)
{
@@ -178,6 +1320,37 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
+
+ if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ ql_get_core_dump(qdev);
+}
+
+/* Coredump to messages log file using separate worker thread */
+void ql_mpi_core_to_log(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_core_to_log.work);
+ u32 *tmp, count;
+ int i;
+
+ count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
+ tmp = (u32 *)qdev->mpi_coredump;
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Core is dumping to log file!\n");
+
+ for (i = 0; i < count; i += 8) {
+ printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
+ "%.08x %.08x %.08x \n", i,
+ tmp[i + 0],
+ tmp[i + 1],
+ tmp[i + 2],
+ tmp[i + 3],
+ tmp[i + 4],
+ tmp[i + 5],
+ tmp[i + 6],
+ tmp[i + 7]);
+ msleep(5);
+ }
}
#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 058fa0a48c6f..7e09ff4a5755 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -7,7 +7,6 @@
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
@@ -67,8 +66,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -89,8 +88,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -107,8 +106,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
spin_lock(&qdev->stats_lock);
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- QPRINTK(qdev, DRV, ERR,
- "Couldn't get xgmac sem.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get xgmac sem.\n");
goto quit;
}
/*
@@ -116,8 +115,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x200; i < 0x280; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -129,8 +129,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x300; i < 0x3d0; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -142,8 +143,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x500; i < 0x540; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -155,8 +157,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x568; i < 0x5a8; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -167,8 +170,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get RX NIC FIFO DROP statistics.
*/
if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n", i);
goto end;
} else
*iter = data;
@@ -396,14 +399,13 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
qdev->wol = wol->wolopts;
- QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
- qdev->wol, ndev->name);
+ netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
if (!qdev->wol) {
u32 wol = 0;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "cleared sucessfully" : "clear failed",
- wol, qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
+ status == 0 ? "cleared successfully" : "clear failed",
+ wol);
}
return 0;
@@ -500,7 +502,8 @@ static int ql_run_loopback_test(struct ql_adapter *qdev)
return -EPIPE;
atomic_inc(&qdev->lb_count);
}
-
+ /* Give queue time to settle before testing results. */
+ msleep(2);
ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
return atomic_read(&qdev->lb_count) ? -EIO : 0;
}
@@ -533,9 +536,13 @@ static void ql_self_test(struct net_device *ndev,
data[0] = 0;
}
clear_bit(QL_SELFTEST, &qdev->flags);
+ /* Give link time to come up after
+ * port configuration changes.
+ */
+ msleep_interruptible(4 * 1000);
} else {
- QPRINTK(qdev, DRV, ERR,
- "%s: is down, Loopback test will fail.\n", ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "is down, Loopback test will fail.\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 707b391afa02..fd34f266c0a8 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, MSIX_IRQ);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+static int qlge_mpi_coredump;
+module_param(qlge_mpi_coredump, int, 0);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+ "Option to enable MPI firmware dump. "
+ "Default is OFF - Do Not allocate memory. ");
+
+static int qlge_force_coredump;
+module_param(qlge_force_coredump, int, 0);
+MODULE_PARM_DESC(qlge_force_coredump,
+ "Option to allow force of firmware core dump. "
+ "Default is OFF - Do not allow.");
+
+static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
@@ -116,7 +128,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
break;
default:
- QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
+ netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
return -EINVAL;
}
@@ -156,17 +168,17 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
/* check for errors */
if (temp & err_bit) {
- QPRINTK(qdev, PROBE, ALERT,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
+ netif_alert(qdev, probe, qdev->ndev,
+ "register 0x%.08x access error, value = 0x%.08x!.\n",
+ reg, temp);
return -EIO;
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
count--;
}
- QPRINTK(qdev, PROBE, ALERT,
- "Timed out waiting for reg %x to come ready.\n", reg);
+ netif_alert(qdev, probe, qdev->ndev,
+ "Timed out waiting for reg %x to come ready.\n", reg);
return -ETIMEDOUT;
}
@@ -209,7 +221,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
map = pci_map_single(qdev->pdev, ptr, size, direction);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
@@ -219,8 +231,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
status = ql_wait_cfg(qdev, bit);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for CFG to come ready.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for CFG to come ready.\n");
goto exit;
}
@@ -301,8 +313,8 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -359,12 +371,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- QPRINTK(qdev, IFUP, DEBUG,
- "Adding %s address %pM"
- " at index %d in the CAM.\n",
- ((type ==
- MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
- "UNICAST"), addr, index);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Adding %s address %pM at index %d in the CAM.\n",
+ type == MAC_ADDR_TYPE_MULTI_MAC ?
+ "MULTICAST" : "UNICAST",
+ addr, index);
status =
ql_wait_reg_rdy(qdev,
@@ -414,9 +425,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
- QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
- (enable_bit ? "Adding" : "Removing"),
- index, (enable_bit ? "to" : "from"));
+ netif_info(qdev, ifup, qdev->ndev,
+ "%s VLAN ID %d %s the CAM.\n",
+ enable_bit ? "Adding" : "Removing",
+ index,
+ enable_bit ? "to" : "from");
status =
ql_wait_reg_rdy(qdev,
@@ -431,8 +444,8 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
}
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -450,17 +463,14 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
char *addr;
if (set) {
- addr = &qdev->ndev->dev_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5]);
+ addr = &qdev->current_mac_addr[0];
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Clearing MAC address on %s\n",
- qdev->ndev->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Clearing MAC address\n");
}
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
@@ -469,23 +479,21 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
- "address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init mac address.\n");
return status;
}
void ql_link_on(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
ql_set_mac_addr(qdev, 1);
}
void ql_link_off(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
ql_set_mac_addr(qdev, 0);
}
@@ -522,27 +530,27 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
- QPRINTK(qdev, IFUP, DEBUG,
- "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
- (enable ? "Adding" : "Removing"),
- ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
- ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
- ((index ==
- RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
- ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
- ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
- ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
- ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
- ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
- ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
- ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
- ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
- ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
- ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
- ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
- ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
- ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
- (enable ? "to" : "from"));
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s %s mask %s the routing reg.\n",
+ enable ? "Adding" : "Removing",
+ index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
+ index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
+ index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
+ index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
+ index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
+ index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
+ index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
+ index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
+ index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
+ index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
+ index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
+ index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
+ index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
+ index == RT_IDX_UNUSED013 ? "UNUSED13" :
+ index == RT_IDX_UNUSED014 ? "UNUSED14" :
+ index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
+ "(Bad index != RT_IDX)",
+ enable ? "to" : "from");
switch (mask) {
case RT_IDX_CAM_HIT:
@@ -602,8 +610,8 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
break;
}
default:
- QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
- mask);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Mask type %d not yet supported.\n", mask);
status = -EPERM;
goto exit;
}
@@ -709,7 +717,7 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
status = strncmp((char *)&qdev->flash, str, 4);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
return status;
}
@@ -717,8 +725,8 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
csum += le16_to_cpu(*flash++);
if (csum)
- QPRINTK(qdev, IFUP, ERR,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Invalid flash checksum, csum = 0x%.04x.\n", csum);
return csum;
}
@@ -770,7 +778,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
}
@@ -779,7 +788,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8000) / sizeof(u16),
"8000");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -797,7 +806,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
- QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
@@ -831,7 +840,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
@@ -841,7 +851,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8012) / sizeof(u16),
"8012");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -959,17 +969,17 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
- QPRINTK(qdev, LINK, INFO,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
+ netif_info(qdev, link, qdev->ndev,
+ "Another function has the semaphore, so wait for the port init bit to come ready.\n");
status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
- QPRINTK(qdev, LINK, CRIT,
- "Port initialize timed out.\n");
+ netif_crit(qdev, link, qdev->ndev,
+ "Port initialize timed out.\n");
}
return status;
}
- QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
+ netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
@@ -1099,8 +1109,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
- QPRINTK(qdev, DRV, ERR,
- "page allocation failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "page allocation failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.offset = 0;
@@ -1110,8 +1120,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
- QPRINTK(qdev, DRV, ERR,
- "PCI mapping failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "PCI mapping failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.map = map;
@@ -1148,15 +1158,15 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->lbq_free_cnt > 32) {
for (i = 0; i < 16; i++) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- QPRINTK(qdev, IFUP, ERR,
- "Could not get a page chunk.\n");
- return;
- }
+ netif_err(qdev, ifup, qdev->ndev,
+ "Could not get a page chunk.\n");
+ return;
+ }
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
@@ -1181,9 +1191,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: updating prod idx = %d.\n",
+ rx_ring->lbq_prod_idx);
ql_write_db_reg(rx_ring->lbq_prod_idx,
rx_ring->lbq_prod_idx_db_reg);
}
@@ -1201,19 +1211,20 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->sbq_free_cnt > 16) {
for (i = 0; i < 16; i++) {
sbq_desc = &rx_ring->sbq[clean_idx];
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "sbq: getting new skb for index %d.\n",
+ sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, PROBE, ERR,
- "Couldn't get an skb.\n");
+ netif_err(qdev, probe, qdev->ndev,
+ "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1223,7 +1234,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->sbq_buf_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -1247,9 +1259,9 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: updating prod idx = %d.\n",
+ rx_ring->sbq_prod_idx);
ql_write_db_reg(rx_ring->sbq_prod_idx,
rx_ring->sbq_prod_idx_db_reg);
}
@@ -1281,8 +1293,9 @@ static void ql_unmap_send(struct ql_adapter *qdev,
* then its an OAL.
*/
if (i == 7) {
- QPRINTK(qdev, TX_DONE, DEBUG,
- "unmapping OAL area.\n");
+ netif_printk(qdev, tx_done, KERN_DEBUG,
+ qdev->ndev,
+ "unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
@@ -1291,8 +1304,8 @@ static void ql_unmap_send(struct ql_adapter *qdev,
maplen),
PCI_DMA_TODEVICE);
} else {
- QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
- i);
+ netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
+ "unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
@@ -1317,7 +1330,8 @@ static int ql_map_send(struct ql_adapter *qdev,
int frag_cnt = skb_shinfo(skb)->nr_frags;
if (frag_cnt) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "frag_cnt = %d.\n", frag_cnt);
}
/*
* Map the skb buffer first.
@@ -1326,8 +1340,8 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping failed with error: %d\n", err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping failed with error: %d\n", err);
return NETDEV_TX_BUSY;
}
@@ -1373,9 +1387,9 @@ static int ql_map_send(struct ql_adapter *qdev,
PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping outbound address list with error: %d\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
goto map_error;
}
@@ -1403,9 +1417,9 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping frags failed with error: %d.\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping frags failed with error: %d.\n",
+ err);
goto map_error;
}
@@ -1433,6 +1447,260 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct sk_buff *skb;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi->dev = qdev->ndev;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, exiting.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ prefetch(lbq_desc->p.pg_chunk.va);
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ rx_frag += nr_frags;
+ rx_frag->page = lbq_desc->p.pg_chunk.page;
+ rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
+ rx_frag->size = length;
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ skb_shinfo(skb)->nr_frags++;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
+ else
+ napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ void *addr;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ skb = netdev_alloc_skb(ndev, length);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, need to unwind!.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+
+ addr = lbq_desc->p.pg_chunk.va;
+ prefetch(addr);
+
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ rx_ring->rx_errors++;
+ goto err_out;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Segment too small, dropping.\n");
+ rx_ring->rx_dropped++;
+ goto err_out;
+ }
+ memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset+ETH_HLEN,
+ length-ETH_HLEN);
+ skb->len += length-ETH_HLEN;
+ skb->data_len += length-ETH_HLEN;
+ skb->truesize += length-ETH_HLEN;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
+ else
+ napi_gro_receive(napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+ return;
+err_out:
+ dev_kfree_skb_any(skb);
+ put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *new_skb = NULL;
+ struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+ skb = sbq_desc->p.skb;
+ /* Allocate new_skb and copy */
+ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ netif_err(qdev, probe, qdev->ndev,
+ "No skb available, drop the packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(skb_put(new_skb, length), skb->data, length);
+ skb = new_skb;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_errors++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ prefetch(skb->data);
+ skb->dev = ndev;
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+ vlan_id, skb);
+ else
+ napi_gro_receive(&rx_ring->napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+}
+
static void ql_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
@@ -1467,7 +1735,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header of %d bytes in small buffer.\n", hdr_len);
/*
* Headers fit nicely into a small buffer.
*/
@@ -1486,15 +1755,16 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* Handle the data buffer(s).
*/
if (unlikely(!length)) { /* Is there data too? */
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No Data buffer in this packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No Data buffer in this packet.\n");
return skb;
}
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Headers in small, data of %d bytes in small, combine them.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Headers in small, data of %d bytes in small, combine them.\n",
+ length);
/*
* Data is less than small buffer size so it's
* stuffed in a small buffer.
@@ -1520,8 +1790,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
maplen),
PCI_DMA_FROMDEVICE);
} else {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes in a single small buffer.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes in a single small buffer.\n",
+ length);
sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
@@ -1536,18 +1807,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Header in small, %d bytes in large. Chain large to small!\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header in small, %d bytes in large. Chain large to small!\n",
+ length);
/*
* The data is in a single large buffer. We
* chain it to the header buffer's skb and let
* it rip.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Chaining page at offset = %d,"
- "for %d bytes to skb.\n",
- lbq_desc->p.pg_chunk.offset, length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Chaining page at offset = %d, for %d bytes to skb.\n",
+ lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
@@ -1563,8 +1834,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) {
- QPRINTK(qdev, PROBE, DEBUG,
- "No skb available, drop the packet.\n");
+ netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop the packet.\n");
return NULL;
}
pci_unmap_page(qdev->pdev,
@@ -1573,8 +1844,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
pci_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
skb_fill_page_desc(skb, 0,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1615,8 +1887,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* a local buffer and use it to find the
* pages to chain.
*/
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers & data in chain of large.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers & data in chain of large.\n",
+ length);
skb = sbq_desc->p.skb;
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
@@ -1626,9 +1899,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
size = (length < rx_ring->lbq_buf_size) ? length :
rx_ring->lbq_buf_size;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Adding page %d to skb for %d bytes.\n",
- i, size);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Adding page %d to skb for %d bytes.\n",
+ i, size);
skb_fill_page_desc(skb, i,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1646,29 +1919,28 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
- u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No skb available, drop packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop packet.\n");
rx_ring->rx_dropped++;
return;
}
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
- ib_mac_rsp->flags2);
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
dev_kfree_skb_any(skb);
rx_ring->rx_errors++;
return;
@@ -1693,17 +1965,18 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
prefetch(skb->data);
skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
rx_ring->rx_multicast++;
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
}
skb->protocol = eth_type_trans(skb, ndev);
@@ -1716,8 +1989,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
@@ -1726,8 +1999,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
if (!(iph->frag_off &
cpu_to_be16(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
}
}
}
@@ -1753,6 +2026,56 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
}
}
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ /* The data and headers are split into
+ * separate buffers.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /* The data fit in a single small buffer.
+ * Allocate a new skb, copy the data and
+ * return the buffer to the free pool.
+ */
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ /* TCP packet in a page chunk that's been checksummed.
+ * Tack it on to our GRO skb and let it go.
+ */
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /* Non-TCP packet in a page chunk. Allocate an
+ * skb, tack it on frags, and send it up.
+ */
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else {
+ /* Non-TCP/UDP large frames that span multiple buffers
+ * can be processed corrrectly by the split frame logic.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ }
+
+ return (unsigned long)length;
+}
+
/* Process an outbound completion from an rx ring. */
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
@@ -1774,20 +2097,20 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
OB_MAC_IOCB_RSP_L |
OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Total descriptor length did not match transfer length.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Total descriptor length did not match transfer length.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too short to be legal, not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too short to be valid, not sent.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too long, but sent anyway.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too long, but sent anyway.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "PCI backplane error. Frame not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "PCI backplane error. Frame not sent.\n");
}
}
atomic_inc(&tx_ring->tx_count);
@@ -1817,33 +2140,35 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR,
- "Management Processor Fatal Error.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Management Processor Fatal Error.\n");
ql_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
- QPRINTK(qdev, LINK, ERR,
- "Multiple CAM hits lookup occurred.\n");
- QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
+ netif_err(qdev, link, qdev->ndev,
+ "Multiple CAM hits lookup occurred.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
- QPRINTK(qdev, RX_ERR, ERR,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netif_err(qdev, rx_err, qdev->ndev,
+ "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
default:
- QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
- ib_ae_rsp->event);
+ netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
+ ib_ae_rsp->event);
ql_queue_asic_error(qdev);
break;
}
@@ -1860,9 +2185,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
@@ -1873,9 +2198,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_process_mac_tx_intr(qdev, net_rsp);
break;
default:
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
}
count++;
ql_update_cq(rx_ring);
@@ -1907,9 +2232,9 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
rmb();
@@ -1925,11 +2250,10 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
net_rsp);
break;
default:
- {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- }
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ break;
}
count++;
ql_update_cq(rx_ring);
@@ -1950,8 +2274,8 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
- QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
- rx_ring->cq_id);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
* right after the RSS rings. */
@@ -1963,9 +2287,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing TX completion ring %d.\n",
- __func__, trx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing TX completion ring %d.\n",
+ __func__, trx_ring->cq_id);
ql_clean_outbound_rx_ring(trx_ring);
}
}
@@ -1975,9 +2299,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
*/
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing RX completion ring %d.\n",
- __func__, rx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing RX completion ring %d.\n",
+ __func__, rx_ring->cq_id);
work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
}
@@ -1994,12 +2318,13 @@ static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *gr
qdev->vlgrp = grp;
if (grp) {
- QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning on VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Turning off VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning off VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
@@ -2015,7 +2340,8 @@ static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
return;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -2032,7 +2358,8 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to clear vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -2061,7 +2388,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
spin_lock(&qdev->hw_lock);
if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "Shared Interrupt, Not ours!\n");
spin_unlock(&qdev->hw_lock);
return IRQ_NONE;
}
@@ -2074,10 +2402,11 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
- QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- QPRINTK(qdev, INTR, ERR,
- "Resetting chip. Error Status Register = 0x%x\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Resetting chip. Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -2090,7 +2419,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
- QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
+ netif_err(qdev, intr, qdev->ndev,
+ "Got MPI processor interrupt.\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
@@ -2105,8 +2435,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
var = ql_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
- QPRINTK(qdev, INTR, INFO,
- "Waking handler for rx_ring[0].\n");
+ netif_info(qdev, intr, qdev->ndev,
+ "Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
@@ -2203,9 +2533,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- QPRINTK(qdev, TX_QUEUED, INFO,
- "%s: shutting down tx queue %d du to lack of resources.\n",
- __func__, tx_ring_idx);
+ netif_info(qdev, tx_queued, qdev->ndev,
+ "%s: shutting down tx queue %d du to lack of resources.\n",
+ __func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
@@ -2226,8 +2556,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
- vlan_tx_tag_get(skb));
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
}
@@ -2241,8 +2571,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "Could not map the segments.\n");
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2253,8 +2583,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
- tx_ring->prod_idx, skb->len);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "tx queued, slot %d, len %d\n",
+ tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
return NETDEV_TX_OK;
@@ -2285,8 +2616,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev,
PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
if (qdev->rx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of RX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2294,8 +2625,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
if (qdev->tx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of TX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2349,7 +2680,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
if ((tx_ring->wq_base == NULL) ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
tx_ring->q =
@@ -2400,7 +2731,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
if (sbq_desc == NULL) {
- QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
@@ -2527,7 +2859,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->cq_base_dma);
if (rx_ring->cq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -2540,8 +2872,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->sbq_base_dma);
if (rx_ring->sbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue allocation failed.\n");
goto err_mem;
}
@@ -2552,8 +2884,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->sbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2569,8 +2901,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->lbq_base_dma);
if (rx_ring->lbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue allocation failed.\n");
goto err_mem;
}
/*
@@ -2580,8 +2912,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->lbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2610,10 +2942,10 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc = &tx_ring->q[i];
if (tx_ring_desc && tx_ring_desc->skb) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Freeing lost SKB %p, from queue %d, index %d.\n",
- tx_ring_desc->skb, j,
- tx_ring_desc->index);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Freeing lost SKB %p, from queue %d, index %d.\n",
+ tx_ring_desc->skb, j,
+ tx_ring_desc->index);
ql_unmap_send(qdev, tx_ring_desc,
tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
@@ -2644,16 +2976,16 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "RX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "RX resource allocation failed.\n");
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "TX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "TX resource allocation failed.\n");
goto err_mem;
}
}
@@ -2788,14 +3120,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
break;
default:
- QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
- rx_ring->type);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Invalid rx_ring->type = %d.\n", rx_ring->type);
}
- QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Initializing rx work queue.\n");
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
}
return err;
@@ -2841,10 +3174,11 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded WQICB.\n");
return err;
}
@@ -2898,15 +3232,15 @@ static void ql_enable_msix(struct ql_adapter *qdev)
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
- QPRINTK(qdev, IFUP, WARNING,
- "MSI-X Enable failed, trying MSI.\n");
+ netif_warn(qdev, ifup, qdev->ndev,
+ "MSI-X Enable failed, trying MSI.\n");
qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
} else if (err == 0) {
set_bit(QL_MSIX_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "MSI-X Enabled, got %d vectors.\n",
- qdev->intr_count);
+ netif_info(qdev, ifup, qdev->ndev,
+ "MSI-X Enabled, got %d vectors.\n",
+ qdev->intr_count);
return;
}
}
@@ -2915,13 +3249,14 @@ msi:
if (qlge_irq_type == MSI_IRQ) {
if (!pci_enable_msi(qdev->pdev)) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "Running with MSI interrupts.\n");
+ netif_info(qdev, ifup, qdev->ndev,
+ "Running with MSI interrupts.\n");
return;
}
}
qlge_irq_type = LEG_IRQ;
- QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Running with legacy interrupts.\n");
}
/* Each vector services 1 RSS ring and and 1 or more
@@ -3093,12 +3428,12 @@ static void ql_free_irq(struct ql_adapter *qdev)
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msix interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msix interrupt %d.\n", i);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msi interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msi interrupt %d.\n", i);
}
}
}
@@ -3123,32 +3458,33 @@ static int ql_request_irq(struct ql_adapter *qdev)
intr_context->name,
&qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed request for MSIX interrupt %d.\n",
- i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed request for MSIX interrupt %d.\n",
+ i);
goto err_irq;
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[i].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[i].type ==
- TX_Q ? "TX_Q" : "",
- qdev->rx_ring[i].type ==
- RX_Q ? "RX_Q" : "", intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[i].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[i].type == TX_Q ?
+ "TX_Q" :
+ qdev->rx_ring[i].type == RX_Q ?
+ "RX_Q" : "",
+ intr_context->name);
}
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "trying msi or legacy interrupts.\n");
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: irq = %d.\n", __func__, pdev->irq);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: context->name = %s.\n", __func__,
- intr_context->name);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: dev_id = 0x%p.\n", __func__,
- &qdev->rx_ring[0]);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "trying msi or legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: irq = %d.\n", __func__, pdev->irq);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: context->name = %s.\n", __func__,
+ intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: dev_id = 0x%p.\n", __func__,
+ &qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
test_bit(QL_MSI_ENABLED,
@@ -3158,20 +3494,20 @@ static int ql_request_irq(struct ql_adapter *qdev)
if (status)
goto err_irq;
- QPRINTK(qdev, IFUP, ERR,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[0].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
- intr_context->name);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[0].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
+ qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ intr_context->name);
}
intr_context->hooked = 1;
}
return status;
err_irq:
- QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
ql_free_irq(qdev);
return status;
}
@@ -3205,14 +3541,15 @@ static int ql_start_rss(struct ql_adapter *qdev)
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
- QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded RICB.\n");
return status;
}
@@ -3227,9 +3564,8 @@ static int ql_clear_routing_entries(struct ql_adapter *qdev)
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM "
- "packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
break;
}
}
@@ -3253,14 +3589,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for error packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for broadcast packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for broadcast packets.\n");
goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
@@ -3270,8 +3606,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
RT_IDX_RSS_MATCH, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for MATCH RSS packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for MATCH RSS packets.\n");
goto exit;
}
}
@@ -3279,8 +3615,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
@@ -3298,13 +3634,13 @@ int ql_cam_route_initialize(struct ql_adapter *qdev)
set &= qdev->port_link_up;
status = ql_set_mac_addr(qdev, set);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
status = ql_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
@@ -3332,15 +3668,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
+ FSC_EC | FSC_VM_PAGE_4K;
+ value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
ql_write32(qdev, FSC, mask | value);
- ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
- min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
+ ql_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
@@ -3369,8 +3705,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start rx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start rx ring[%d].\n", i);
return status;
}
}
@@ -3381,7 +3717,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
if (qdev->rss_ring_count > 1) {
status = ql_start_rss(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
}
}
@@ -3390,8 +3726,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->tx_ring_count; i++) {
status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start tx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start tx ring[%d].\n", i);
return status;
}
}
@@ -3399,20 +3735,20 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = 0; i < qdev->rss_ring_count; i++) {
- QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
- i);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Enabling NAPI for rx_ring[%d].\n", i);
napi_enable(&qdev->rx_ring[i].napi);
}
@@ -3429,7 +3765,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
}
@@ -3452,8 +3788,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
} while (time_before(jiffies, end_jiffies));
if (value & RST_FO_FR) {
- QPRINTK(qdev, IFDOWN, ERR,
- "ETIMEDOUT!!! errored out of resetting the chip!\n");
+ netif_err(qdev, ifdown, qdev->ndev,
+ "ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
@@ -3466,16 +3802,17 @@ static void ql_display_dev_info(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- QPRINTK(qdev, PROBE, INFO,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
- "XG Roll = %d, XG Rev = %d.\n",
- qdev->func,
- qdev->port,
- qdev->chip_rev_id & 0x0000000f,
- qdev->chip_rev_id >> 4 & 0x0000000f,
- qdev->chip_rev_id >> 8 & 0x0000000f,
- qdev->chip_rev_id >> 12 & 0x0000000f);
- QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
+ netif_info(qdev, probe, qdev->ndev,
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
+ "XG Roll = %d, XG Rev = %d.\n",
+ qdev->func,
+ qdev->port,
+ qdev->chip_rev_id & 0x0000000f,
+ qdev->chip_rev_id >> 4 & 0x0000000f,
+ qdev->chip_rev_id >> 8 & 0x0000000f,
+ qdev->chip_rev_id >> 12 & 0x0000000f);
+ netif_info(qdev, probe, qdev->ndev,
+ "MAC address %pM\n", ndev->dev_addr);
}
int ql_wol(struct ql_adapter *qdev)
@@ -3492,23 +3829,23 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST)) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
- qdev->wol);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+ qdev->wol);
return -EINVAL;
}
if (qdev->wol & WAKE_MAGIC) {
status = ql_mb_wol_set_magic(qdev, 1);
if (status) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Failed to set magic packet on %s.\n",
- qdev->ndev->name);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Failed to set magic packet on %s.\n",
+ qdev->ndev->name);
return status;
} else
- QPRINTK(qdev, DRV, INFO,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
@@ -3516,9 +3853,10 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "Sucessfully set" : "Failed", wol,
- qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "Successfully set" : "Failed",
+ wol, qdev->ndev->name);
}
return status;
@@ -3538,6 +3876,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -3558,8 +3897,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
status = ql_adapter_reset(qdev);
if (status)
- QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
- qdev->func);
+ netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
+ qdev->func);
return status;
}
@@ -3569,7 +3908,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
err = ql_adapter_initialize(qdev);
if (err) {
- QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
+ netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
@@ -3601,7 +3940,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
int status = 0;
if (ql_alloc_mem_resources(qdev)) {
- QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
status = ql_request_irq(qdev);
@@ -3612,6 +3951,16 @@ static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ /* If we hit pci_channel_io_perm_failure
+ * failure condition, then we already
+ * brought the adapter down.
+ */
+ if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
+ netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
+ clear_bit(QL_EEH_FATAL, &qdev->flags);
+ return 0;
+ }
+
/*
* Wait for device to recover from a reset.
* (Rarely happens, but possible.)
@@ -3681,9 +4030,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- QPRINTK(qdev, IFUP, DEBUG,
- "lbq_buf_size %d, order = %d\n",
- rx_ring->lbq_buf_size, qdev->lbq_buf_order);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "lbq_buf_size %d, order = %d\n",
+ rx_ring->lbq_buf_size,
+ qdev->lbq_buf_order);
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
@@ -3747,14 +4097,14 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- QPRINTK(qdev, IFUP, ERR,
- "Waiting for adapter UP...\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for adapter UP\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
@@ -3780,8 +4130,8 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
return status;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device.\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
@@ -3793,28 +4143,25 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
int status;
if (ndev->mtu == 1500 && new_mtu == 9000) {
- QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
- QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
- } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
- (ndev->mtu == 9000 && new_mtu == 9000)) {
- return 0;
+ netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
} else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3*HZ);
+ ndev->mtu = new_mtu;
+
if (!netif_running(qdev->ndev)) {
- ndev->mtu = new_mtu;
return 0;
}
- ndev->mtu = new_mtu;
status = ql_change_rx_buffers(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Changing MTU failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Changing MTU failed.\n");
}
return status;
@@ -3874,8 +4221,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set promiscous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -3884,8 +4231,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear promiscous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -3897,12 +4244,12 @@ static void qlge_set_multicast_list(struct net_device *ndev)
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+ (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set all-multi mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -3911,32 +4258,34 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear all-multi mode.\n");
} else {
clear_bit(QL_ALLMULTI, &qdev->flags);
}
}
}
- if (ndev->mc_count) {
+ if (!netdev_mc_empty(ndev)) {
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
- for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
- i++, mc_ptr = mc_ptr->next)
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, ndev) {
if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to loadmulticast address.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to loadmulticast address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
+ i++;
+ }
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (ql_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set multicast match mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set multicast match mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -3954,6 +4303,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ /* Update local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
@@ -3961,7 +4312,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
if (status)
- QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+ netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
@@ -3994,8 +4345,8 @@ static void ql_asic_reset_work(struct work_struct *work)
rtnl_unlock();
return;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
@@ -4094,6 +4445,7 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->reg_base);
if (qdev->doorbell_area)
iounmap(qdev->doorbell_area);
+ vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -4119,7 +4471,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_out;
+ goto err_out1;
}
err = pci_request_regions(pdev, DRV_NAME);
@@ -4140,7 +4492,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
- goto err_out;
+ goto err_out2;
}
/* Set PCIe reset type for EEH to fundamental. */
@@ -4152,7 +4504,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out2;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@@ -4162,27 +4514,40 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out2;
}
err = ql_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
- goto err_out;
+ goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
+ if (qlge_mpi_coredump) {
+ qdev->mpi_coredump =
+ vmalloc(sizeof(struct ql_mpi_coredump));
+ if (qdev->mpi_coredump == NULL) {
+ dev_err(&pdev->dev, "Coredump alloc failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ if (qlge_force_coredump)
+ set_bit(QL_FRC_COREDUMP, &qdev->flags);
+ }
/* make sure the EEPROM is good */
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
- goto err_out;
+ goto err_out2;
}
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+ /* Keep local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
/* Set up the default ring sizes. */
qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
@@ -4204,6 +4569,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+ INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
if (!cards_found) {
@@ -4212,8 +4578,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
DRV_NAME, DRV_VERSION);
}
return 0;
-err_out:
+err_out2:
ql_release_all(pdev);
+err_out1:
pci_disable_device(pdev);
return err;
}
@@ -4233,6 +4600,21 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
+static void ql_timer(unsigned long data)
+{
+ struct ql_adapter *qdev = (struct ql_adapter *)data;
+ u32 var = 0;
+
+ var = ql_read32(qdev, STS);
+ if (pci_channel_offline(qdev->pdev)) {
+ netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
+ return;
+ }
+
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
+}
+
static int __devinit qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
@@ -4284,6 +4666,14 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
pci_disable_device(pdev);
return err;
}
+ /* Start up the timer to trigger EEH if
+ * the bus goes dead
+ */
+ init_timer_deferrable(&qdev->timer);
+ qdev->timer.data = (unsigned long)qdev;
+ qdev->timer.function = ql_timer;
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
@@ -4304,6 +4694,8 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
static void __devexit qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ del_timer_sync(&qdev->timer);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
@@ -4326,6 +4718,7 @@ static void ql_eeh_close(struct net_device *ndev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4345,6 +4738,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
enum pci_channel_state state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
switch (state) {
case pci_channel_io_normal:
@@ -4358,6 +4752,8 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ ql_eeh_close(ndev);
+ set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -4380,11 +4776,18 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
- QPRINTK(qdev, IFUP, ERR,
- "Cannot re-enable PCI device after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
+
+ if (ql_adapter_reset(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -4394,19 +4797,19 @@ static void qlge_io_resume(struct pci_dev *pdev)
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
- if (ql_adapter_reset(qdev))
- QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
- QPRINTK(qdev, IFUP, ERR,
- "Device initialization failed after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device initialization failed after reset.\n");
return;
}
} else {
- QPRINTK(qdev, IFUP, ERR,
- "Device was not running prior to EEH.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device was not running prior to EEH.\n");
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
}
@@ -4423,6 +4826,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
int err;
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
err = ql_adapter_down(qdev);
@@ -4453,7 +4857,7 @@ static int qlge_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
+ netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -4467,6 +4871,8 @@ static int qlge_resume(struct pci_dev *pdev)
return err;
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d4..3c00462a5d22 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
#include "qlge.h"
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+
+ /* Un-pause the RISC */
+ tmp = ql_read32(qdev, CSR);
+ if (!(tmp & CSR_RP))
+ return -EIO;
+
+ ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+ return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Pause the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RP)
+ break;
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Reset the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RR) {
+ ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ break;
+ }
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
return status;
}
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+int ql_own_firmware(struct ql_adapter *qdev)
+{
+ u32 temp;
+
+ /* If we are the lower of the 2 NIC functions
+ * on the chip the we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ if (qdev->func < qdev->alt_func)
+ return 1;
+
+ /* If we are the higher of the 2 NIC functions
+ * on the chip and the lower function is not
+ * enabled, then we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ temp = ql_read32(qdev, STS);
+ if (!(temp & (1 << (8 + qdev->alt_func))))
+ return 1;
+
+ return 0;
+
+}
+
static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
@@ -57,7 +135,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
}
}
@@ -130,7 +208,7 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
- QPRINTK(qdev, DRV, ERR, "Enter!\n");
+ netif_err(qdev, drv, qdev->ndev, "Enter!\n");
/* Get the status data and start up a thread to
* handle the request.
*/
@@ -138,8 +216,8 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
} else {
/* Begin polled mode early so
@@ -162,8 +240,8 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting RISC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting RISC!\n");
ql_queue_fw_error(qdev);
} else
/* Wake up the sleeping mpi_idc_work thread that is
@@ -181,13 +259,13 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "%s: Could not get mailbox status.\n", __func__);
+ netif_err(qdev, drv, qdev->ndev,
+ "%s: Could not get mailbox status.\n", __func__);
return;
}
qdev->link_status = mbcp->mbox_out[1];
- QPRINTK(qdev, DRV, ERR, "Link Up.\n");
+ netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
/* If we're coming back from an IDC event
* then set up the CAM and frame routing.
@@ -195,8 +273,8 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return;
} else
clear_bit(QL_CAM_RT_SET, &qdev->flags);
@@ -207,7 +285,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* to our liking.
*/
if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
- QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n");
+ netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
set_bit(QL_PORT_CFG, &qdev->flags);
/* Begin polled mode early so
* we don't get another interrupt
@@ -229,7 +307,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
ql_link_off(qdev);
}
@@ -242,9 +320,9 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
return status;
}
@@ -257,9 +335,9 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
return status;
}
@@ -272,13 +350,13 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
else {
int i;
- QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
for (i = 0; i < mbcp->out_count; i++)
- QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
+ netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
}
@@ -293,15 +371,15 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+ netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
} else {
- QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
+ netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
+ mbcp->mbox_out[1]);
qdev->fw_rev_id = mbcp->mbox_out[1];
status = ql_cam_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
}
}
@@ -320,8 +398,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->out_count = 1;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
goto end;
}
@@ -410,15 +488,14 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->mbox_out[0] = MB_CMD_STS_ERR;
return status;
}
- QPRINTK(qdev, DRV, ERR,
- "Firmware initialization failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware initialization failed.\n");
status = -EIO;
ql_queue_fw_error(qdev);
break;
case AEN_SYS_ERR:
- QPRINTK(qdev, DRV, ERR,
- "System Error.\n");
+ netif_err(qdev, drv, qdev->ndev, "System Error.\n");
ql_queue_fw_error(qdev);
status = -EIO;
break;
@@ -431,8 +508,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* Need to support AEN 8110 */
break;
default:
- QPRINTK(qdev, DRV, ERR,
- "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
/* Clear the MPI firmware status. */
}
end:
@@ -505,8 +582,8 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
goto done;
} while (time_before(jiffies, count));
- QPRINTK(qdev, DRV, ERR,
- "Timed out waiting for mailbox complete.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Timed out waiting for mailbox complete.\n");
status = -ETIMEDOUT;
goto end;
@@ -529,6 +606,22 @@ end:
return status;
}
+int ql_mb_sys_err(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 0;
+
+ mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ return status;
+}
/* Get MPI firmware version. This will be used for
* driver banner and for ethtool info.
@@ -552,8 +645,8 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed about firmware command\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed about firmware command\n");
status = -EIO;
}
@@ -584,8 +677,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Firmware State.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Firmware State.\n");
status = -EIO;
}
@@ -594,8 +687,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
* happen.
*/
if (mbcp->mbox_out[1] & 1) {
- QPRINTK(qdev, DRV, ERR,
- "Firmware waiting for initialization.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware waiting for initialization.\n");
status = -EIO;
}
@@ -627,8 +720,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed IDC ACK send.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
status = -EIO;
}
return status;
@@ -659,16 +751,72 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
- QPRINTK(qdev, DRV, ERR,
- "Port Config sent, wait for IDC.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Port Config sent, wait for IDC.\n");
} else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Set Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Set Port Configuration.\n");
status = -EIO;
}
return status;
}
+int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+ u32 size)
+{
+ int status = 0;
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 9;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+ mbcp->mbox_in[1] = LSW(addr);
+ mbcp->mbox_in[2] = MSW(req_dma);
+ mbcp->mbox_in[3] = LSW(req_dma);
+ mbcp->mbox_in[4] = MSW(size);
+ mbcp->mbox_in[5] = LSW(size);
+ mbcp->mbox_in[6] = MSW(MSD(req_dma));
+ mbcp->mbox_in[7] = LSW(MSD(req_dma));
+ mbcp->mbox_in[8] = MSW(addr);
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Issue a mailbox command to dump RISC RAM. */
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count)
+{
+ int status;
+ char *my_buf;
+ dma_addr_t buf_dma;
+
+ my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
+ &buf_dma);
+ if (!my_buf)
+ return -EIO;
+
+ status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+ if (!status)
+ memcpy(buf, my_buf, word_count * sizeof(u32));
+
+ pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
+ buf_dma);
+ return status;
+}
+
/* Get link settings and maximum frame size settings
* for the current port.
* Most likely will block.
@@ -691,12 +839,12 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Port Configuration.\n");
status = -EIO;
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "Passed Get Port Configuration.\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Passed Get Port Configuration.\n");
qdev->link_config = mbcp->mbox_out[1];
qdev->max_frame_size = mbcp->mbox_out[2];
}
@@ -723,8 +871,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -766,8 +913,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -793,8 +939,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
wait_for_completion_timeout(&qdev->ide_completion,
wait_time);
if (!wait_time) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Timeout.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
break;
}
/* Now examine the response from the IDC process.
@@ -802,18 +947,17 @@ static int ql_idc_wait(struct ql_adapter *qdev)
* more wait time.
*/
if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Time Extension from function.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC Time Extension from function.\n");
wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
} else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Success.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
status = 0;
break;
} else {
- QPRINTK(qdev, DRV, ERR,
- "IDC: Invalid State 0x%.04x.\n",
- mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC: Invalid State 0x%.04x.\n",
+ mbcp->mbox_out[0]);
status = -EIO;
break;
}
@@ -842,8 +986,8 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to set LED Configuration.\n");
status = -EIO;
}
@@ -868,8 +1012,8 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get LED Configuration.\n");
status = -EIO;
} else
qdev->led_config = mbcp->mbox_out[1];
@@ -899,16 +1043,16 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
/* This indicates that the firmware is
* already in the state we are trying to
* change it to.
*/
- QPRINTK(qdev, DRV, ERR,
- "Command parameters make no change.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command parameters make no change.\n");
}
return status;
}
@@ -938,12 +1082,12 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
}
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get MPI traffic control.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get MPI traffic control.\n");
status = -EIO;
}
return status;
@@ -999,8 +1143,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
status = ql_mb_get_port_cfg(qdev);
rtnl_unlock();
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to get port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to get port config data.\n");
goto err;
}
@@ -1013,8 +1157,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
status = ql_set_port_cfg(qdev);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to set port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to set port config data.\n");
goto err;
}
end:
@@ -1046,8 +1190,8 @@ void ql_mpi_idc_work(struct work_struct *work)
switch (aen) {
default:
- QPRINTK(qdev, DRV, ERR,
- "Bug: Unhandled IDC action.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Unhandled IDC action.\n");
break;
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
@@ -1062,11 +1206,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1095,11 +1239,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1143,5 +1287,19 @@ void ql_mpi_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ /* If we're not the dominant NIC function,
+ * then there is nothing to do.
+ */
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
+ qdev->core_is_dumped = 1;
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_core_to_log, 5 * HZ);
+ }
ql_soft_reset_mpi_risc(qdev);
}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a8..0298d8c1dcb6 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -29,7 +29,6 @@
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -135,7 +134,7 @@
#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
-#define MCAST_MAX 4 /* Max number multicast addresses to filter */
+#define MCAST_MAX 3 /* Max number multicast addresses to filter */
/* Descriptor status */
#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
@@ -938,7 +937,7 @@ static void r6040_multicast_list(struct net_device *dev)
u16 *adrp;
u16 reg;
unsigned long flags;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
/* MAC Address */
@@ -958,25 +957,24 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Too many multicast addresses
* accept all traffic */
- else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI))
+ else if ((netdev_mc_count(dev) > MCAST_MAX) ||
+ (dev->flags & IFF_ALLMULTI))
reg |= 0x0020;
iowrite16(reg, ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
/* Build the hash table */
- if (dev->mc_count > MCAST_MAX) {
+ if (netdev_mc_count(dev) > MCAST_MAX) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
char *addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
if (!(*addrs & 1))
continue;
@@ -984,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev)
crc >>= 26;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
- /* Write the index of the hash table */
- for (i = 0; i < 4; i++)
- iowrite16(hash_table[i] << 14, ioaddr + MCR1);
/* Fill the MAC hash tables with their values */
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
@@ -994,17 +989,19 @@ static void r6040_multicast_list(struct net_device *dev)
iowrite16(hash_table[3], ioaddr + MAR3);
}
/* Multicast Address 1~4 case */
- for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
- adrp = (u16 *)dmi->dmi_addr;
- iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
- iowrite16(adrp[1], ioaddr + MID_1M + 8*i);
- iowrite16(adrp[2], ioaddr + MID_1H + 8*i);
- dmi = dmi->next;
- }
- for (i = dev->mc_count; i < MCAST_MAX; i++) {
- iowrite16(0xffff, ioaddr + MID_0L + 8*i);
- iowrite16(0xffff, ioaddr + MID_0M + 8*i);
- iowrite16(0xffff, ioaddr + MID_0H + 8*i);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (i < MCAST_MAX) {
+ adrp = (u16 *) dmi->dmi_addr;
+ iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+ iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+ iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+ } else {
+ iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
+ }
+ i++;
}
}
@@ -1222,7 +1219,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id r6040_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
{ 0 }
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 60f96c468a24..4748c21eb72e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
static void rtl_hw_start_8168(struct net_device *);
static void rtl_hw_start_8101(struct net_device *);
-static struct pci_device_id rtl8169_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
@@ -186,7 +186,12 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static int rx_copybreak = 200;
+/*
+ * we set our copybreak very high so that we don't have
+ * to allocate 16k frames all the time (see note in
+ * rtl8169_open()
+ */
+static int rx_copybreak = 16383;
static int use_dac;
static struct {
u32 msg_enable;
@@ -744,12 +749,10 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
netif_carrier_on(dev);
- if (netif_msg_ifup(tp))
- printk(KERN_INFO PFX "%s: link up\n", dev->name);
+ netif_info(tp, ifup, dev, "link up\n");
} else {
- if (netif_msg_ifdown(tp))
- printk(KERN_INFO PFX "%s: link down\n", dev->name);
netif_carrier_off(dev);
+ netif_info(tp, ifdown, dev, "link down\n");
}
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -862,11 +865,8 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
} else if (autoneg == AUTONEG_ENABLE)
RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
else {
- if (netif_msg_link(tp)) {
- printk(KERN_WARNING "%s: "
- "incorrect speed setting refused in TBI mode\n",
- dev->name);
- }
+ netif_warn(tp, link, dev,
+ "incorrect speed setting refused in TBI mode\n");
ret = -EOPNOTSUPP;
}
@@ -901,9 +901,9 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
(tp->mac_version != RTL_GIGA_MAC_VER_15) &&
(tp->mac_version != RTL_GIGA_MAC_VER_16)) {
giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- } else if (netif_msg_link(tp)) {
- printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
- dev->name);
+ } else {
+ netif_info(tp, link, dev,
+ "PHY does not support 1000Mbps\n");
}
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
@@ -2705,8 +2705,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
if (tp->link_ok(ioaddr))
goto out_unlock;
- if (netif_msg_link(tp))
- printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
+ netif_warn(tp, link, dev, "PHY reset until link up\n");
tp->phy_reset_enable(ioaddr);
@@ -2760,6 +2759,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
{
iounmap(ioaddr);
pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
pci_disable_device(pdev);
free_netdev(dev);
}
@@ -2776,8 +2776,7 @@ static void rtl8169_phy_reset(struct net_device *dev,
return;
msleep(1);
}
- if (netif_msg_link(tp))
- printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+ netif_err(tp, link, dev, "PHY reset failed\n");
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -2811,8 +2810,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
*/
rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
- if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
+ if (RTL_R8(PHYstatus) & TBI_Enable)
+ netif_info(tp, link, dev, "TBI auto-negotiating\n");
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -2827,8 +2826,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
spin_lock_irq(&tp->lock);
RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W32(MAC0, low);
+
RTL_W32(MAC4, high);
+ RTL_R32(MAC4);
+
+ RTL_W32(MAC0, low);
+ RTL_R32(MAC0);
+
RTL_W8(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&tp->lock);
@@ -3012,41 +3016,34 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "enable failure\n");
+ netif_err(tp, probe, dev, "enable failure\n");
goto err_out_free_dev_1;
}
- rc = pci_set_mwi(pdev);
- if (rc < 0)
- goto err_out_disable_2;
+ if (pci_set_mwi(pdev) < 0)
+ netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
/* make sure PCI base addr 1 is MMIO */
if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
- }
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
rc = -ENODEV;
- goto err_out_mwi_3;
+ goto err_out_mwi_2;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "Invalid PCI region size(s), aborting\n");
- }
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
- goto err_out_mwi_3;
+ goto err_out_mwi_2;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "could not request regions.\n");
- goto err_out_mwi_3;
+ netif_err(tp, probe, dev, "could not request regions\n");
+ goto err_out_mwi_2;
}
tp->cp_cmd = PCIMulRW | RxChkSum;
@@ -3058,26 +3055,22 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "DMA configuration failed.\n");
- }
- goto err_out_free_res_4;
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_free_res_3;
}
}
/* ioremap MMIO region */
ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
if (!ioaddr) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
- goto err_out_free_res_4;
+ goto err_out_free_res_3;
}
tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!tp->pcie_cap && netif_msg_probe(tp))
- dev_info(&pdev->dev, "no PCI Express capability\n");
+ if (!tp->pcie_cap)
+ netif_info(tp, probe, dev, "no PCI Express capability\n");
RTL_W16(IntrMask, 0x0000);
@@ -3100,10 +3093,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Use appropriate default if unknown */
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- if (netif_msg_probe(tp)) {
- dev_notice(&pdev->dev,
- "unknown MAC, using family default\n");
- }
+ netif_notice(tp, probe, dev,
+ "unknown MAC, using family default\n");
tp->mac_version = cfg->default_ver;
}
@@ -3116,7 +3107,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (i == ARRAY_SIZE(rtl_chip_info)) {
dev_err(&pdev->dev,
"driver bug, MAC version not found in rtl_chip_info\n");
- goto err_out_msi_5;
+ goto err_out_msi_4;
}
tp->chipset = i;
@@ -3181,23 +3172,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_msi_5;
+ goto err_out_msi_4;
pci_set_drvdata(pdev, dev);
- if (netif_msg_probe(tp)) {
- u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff;
-
- printk(KERN_INFO "%s: %s at 0x%lx, "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
- "XID %08x IRQ %d\n",
- dev->name,
- rtl_chip_info[tp->chipset].name,
- dev->base_addr,
- dev->dev_addr[0], dev->dev_addr[1],
- dev->dev_addr[2], dev->dev_addr[3],
- dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
- }
+ netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
+ rtl_chip_info[tp->chipset].name,
+ dev->base_addr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
rtl8169_init_phy(dev, tp);
@@ -3213,14 +3195,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
-err_out_msi_5:
+err_out_msi_4:
rtl_disable_msi(pdev, tp);
iounmap(ioaddr);
-err_out_free_res_4:
+err_out_free_res_3:
pci_release_regions(pdev);
-err_out_mwi_3:
+err_out_mwi_2:
pci_clear_mwi(pdev);
-err_out_disable_2:
pci_disable_device(pdev);
err_out_free_dev_1:
free_netdev(dev);
@@ -3245,9 +3226,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
}
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
- struct net_device *dev)
+ unsigned int mtu)
{
- unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (max_frame != 16383)
+ printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
+ "NIC may lead to frame reception errors!\n");
tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
}
@@ -3259,7 +3244,17 @@ static int rtl8169_open(struct net_device *dev)
int retval = -ENOMEM;
- rtl8169_set_rxbufsize(tp, dev);
+ /*
+ * Note that we use a magic value here, its wierd I know
+ * its done because, some subset of rtl8169 hardware suffers from
+ * a problem in which frames received that are longer than
+ * the size set in RxMaxSize register return garbage sizes
+ * when received. To avoid this we need to turn off filtering,
+ * which is done by setting a value of 16383 in the RxMaxSize register
+ * and allocating 16k frames to handle the largest possible rx value
+ * thats what the magic math below does.
+ */
+ rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
/*
* Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3912,7 +3907,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
rtl8169_down(dev);
- rtl8169_set_rxbufsize(tp, dev);
+ rtl8169_set_rxbufsize(tp, dev->mtu);
ret = rtl8169_init_ring(dev);
if (ret < 0)
@@ -4136,10 +4131,10 @@ static void rtl8169_reinit_task(struct work_struct *work)
ret = rtl8169_open(dev);
if (unlikely(ret < 0)) {
- if (net_ratelimit() && netif_msg_drv(tp)) {
- printk(KERN_ERR PFX "%s: reinit failure (status = %d)."
- " Rescheduling.\n", dev->name, ret);
- }
+ if (net_ratelimit())
+ netif_err(tp, drv, dev,
+ "reinit failure (status = %d). Rescheduling\n",
+ ret);
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
@@ -4169,10 +4164,8 @@ static void rtl8169_reset_task(struct work_struct *work)
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
} else {
- if (net_ratelimit() && netif_msg_intr(tp)) {
- printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
- dev->name);
- }
+ if (net_ratelimit())
+ netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -4260,11 +4253,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts1;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
- if (netif_msg_drv(tp)) {
- printk(KERN_ERR
- "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
- }
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop;
}
@@ -4297,7 +4286,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- smp_wmb();
+ wmb();
RTL_W8(TxPoll, NPQ); /* set polling bit */
@@ -4326,11 +4315,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- if (netif_msg_intr(tp)) {
- printk(KERN_ERR
- "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
- dev->name, pci_cmd, pci_status);
- }
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
+ pci_cmd, pci_status);
/*
* The recovery sequence below admits a very elaborated explanation:
@@ -4354,8 +4340,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
- if (netif_msg_intr(tp))
- printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
+ netif_info(tp, intr, dev, "disabling PCI DAC\n");
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
@@ -4482,11 +4467,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
if (status & DescOwn)
break;
if (unlikely(status & RxRES)) {
- if (netif_msg_rx_err(tp)) {
- printk(KERN_INFO
- "%s: Rx ERROR. status = %08x\n",
- dev->name, status);
- }
+ netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
+ status);
dev->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
dev->stats.rx_length_errors++;
@@ -4549,8 +4531,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
tp->cur_rx = cur_rx;
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (!delta && count && netif_msg_intr(tp))
- printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
/*
@@ -4560,8 +4542,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
* after refill ?
* - how do others driver handle this condition (Uh oh...).
*/
- if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
- printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
+ if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
return count;
}
@@ -4616,10 +4598,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (likely(napi_schedule_prep(&tp->napi)))
__napi_schedule(&tp->napi);
- else if (netif_msg_intr(tp)) {
- printk(KERN_INFO "%s: interrupt %04x in poll\n",
- dev->name, status);
- }
+ else
+ netif_info(tp, intr, dev,
+ "interrupt %04x in poll\n", status);
}
/* We only get a new MSI interrupt when all active irq
@@ -4656,7 +4637,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
* until it does.
*/
tp->intr_mask = 0xffff;
- smp_wmb();
+ wmb();
RTL_W16(IntrMask, tp->intr_event);
}
@@ -4755,27 +4736,22 @@ static void rtl_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
- if (netif_msg_link(tp)) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
- dev->name);
- }
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
- unsigned int i;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
@@ -4794,8 +4770,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
mc_filter[1] = swab32(data);
}
- RTL_W32(MAR0 + 0, mc_filter[0]);
RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
RTL_W32(RxConfig, tmp);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index ede937ee50c7..07eb884ff982 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/rio.h>
#include <linux/rio_drv.h>
+#include <linux/slab.h>
#include <linux/rio_ids.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 20a71749154a..f2e335f0d1b7 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -40,6 +40,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -1293,7 +1294,7 @@ static void rr_dump(struct net_device *dev)
printk("Error code 0x%x\n", readl(&regs->Fail1));
- index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES;
+ index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
cons = rrpriv->dirty_tx;
printk("TX ring index %i, TX consumer %i\n",
index, cons);
@@ -1688,7 +1689,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
-static struct pci_device_id rr_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
{ PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cc4218667cba..92ae8d3de39b 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -79,6 +79,7 @@
#include <linux/tcp.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <net/tcp.h>
#include <asm/system.h>
@@ -523,7 +524,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
* S2IO device table.
* This table lists all the devices that this driver supports.
*/
-static struct pci_device_id s2io_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
@@ -923,8 +924,8 @@ static int init_shared_mem(struct s2io_nic *nic)
tmp_v_addr = mac_control->stats_mem;
mac_control->stats_info = (struct stat_block *)tmp_v_addr;
memset(tmp_v_addr, 0, size);
- DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n", dev->name,
- (unsigned long long)tmp_p_addr);
+ DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
+ dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
return SUCCESS;
}
@@ -3421,7 +3422,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
break;
}
} else {
- if (!(val64 & busy_bit)) {
+ if (val64 & busy_bit) {
ret = SUCCESS;
break;
}
@@ -3480,7 +3481,7 @@ static void s2io_reset(struct s2io_nic *sp)
struct swStat *swstats;
DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
- __func__, sp->dev->name);
+ __func__, pci_name(sp->pdev));
/* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
@@ -5055,8 +5056,8 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Update individual M_CAST address list */
- if ((!sp->m_cast_flg) && dev->mc_count) {
- if (dev->mc_count >
+ if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
+ if (netdev_mc_count(dev) >
(config->max_mc_addr - config->max_mac_addr)) {
DBG_PRINT(ERR_DBG,
"%s: No more Rx filters can be added - "
@@ -5066,7 +5067,7 @@ static void s2io_set_multicast(struct net_device *dev)
}
prev_cnt = sp->mc_addr_count;
- sp->mc_addr_count = dev->mc_count;
+ sp->mc_addr_count = netdev_mc_count(dev);
/* Clear out the previous list of Mc in the H/W. */
for (i = 0; i < prev_cnt; i++) {
@@ -5092,8 +5093,8 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Create the new Rx filter list and update the same in H/W. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
mac_addr = 0;
@@ -5121,6 +5122,7 @@ static void s2io_set_multicast(struct net_device *dev)
dev->name);
return;
}
+ i++;
}
}
}
@@ -5818,10 +5820,8 @@ static void s2io_vpd_read(struct s2io_nic *nic)
}
}
- if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
- memset(nic->product_name, 0, vpd_data[1]);
+ if ((!fail) && (vpd_data[1] < VPD_STRING_LEN))
memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
- }
kfree(vpd_data);
swstats->mem_freed += 256;
}
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 9f83a1197375..abc8eefdd4b6 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -42,7 +42,6 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
#include <linux/errno.h>
#include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
@@ -52,6 +51,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
#include <linux/pnp.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/processor.h>
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 564d4d7f855b..9944e5d662c0 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2161,13 +2161,13 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
* XXX if the table overflows */
idx = 1; /* skip station address */
- mclist = dev->mc_list;
- while (mclist && (idx < MAC_ADDR_COUNT)) {
+ netdev_for_each_mc_addr(mclist, dev) {
+ if (idx == MAC_ADDR_COUNT)
+ break;
reg = sbmac_addr2reg(mclist->dmi_addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
- mclist = mclist->next;
}
/*
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f97..d87c4787fffa 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -429,13 +429,13 @@ static void _sc92031_set_mar(struct net_device *dev)
u32 mar0 = 0, mar1 = 0;
if ((dev->flags & IFF_PROMISC) ||
- dev->mc_count > multicast_filter_limit ||
+ netdev_mc_count(dev) > multicast_filter_limit ||
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
struct dev_mc_list *mc_list;
- for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, dev) {
u32 crc;
unsigned bit = 0;
@@ -1589,7 +1589,7 @@ out:
return 0;
}
-static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
{ PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index fe806bd9b95f..374832cca11f 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -37,7 +37,6 @@ static const char version[] =
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f983e3b507cc..649a264d6a81 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -20,6 +20,7 @@
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
+#include <linux/gfp.h>
#include "net_driver.h"
#include "efx.h"
#include "mdio_10g.h"
@@ -741,14 +742,14 @@ static int efx_probe_port(struct efx_nic *efx)
EFX_LOG(efx, "create port\n");
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
goto err;
- if (phy_flash_cfg)
- efx->phy_mode = PHY_MODE_SPECIAL;
-
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
@@ -1602,11 +1603,10 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct dev_mc_list *mc_list = net_dev->mc_list;
+ struct dev_mc_list *mc_list;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
- int i;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
@@ -1615,11 +1615,10 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- for (i = 0; i < net_dev->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net_dev) {
crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
- mc_list = mc_list->next;
}
/* Broadcast packets go through the multicast hash filter.
@@ -1862,6 +1861,7 @@ out:
}
if (disabled) {
+ dev_close(efx->net_dev);
EFX_ERR(efx, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
@@ -1885,8 +1885,7 @@ static void efx_reset_work(struct work_struct *data)
}
rtnl_lock();
- if (efx_reset(efx, efx->reset_pending))
- dev_close(efx->net_dev);
+ (void)efx_reset(efx, efx->reset_pending);
rtnl_unlock();
}
@@ -1940,7 +1939,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
**************************************************************************/
/* PCI device ID table */
-static struct pci_device_id efx_pci_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
@@ -2284,6 +2283,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
+ WARN_ON(rc > 0);
EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index a615ac051530..7eff0a615cb3 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -79,8 +79,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern void efx_suspend(struct efx_nic *efx);
-extern void efx_resume(struct efx_nic *efx);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
int rx_usecs, bool rx_adaptive);
extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 6c0bbed8c477..d9f9c02a928e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -196,7 +196,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -216,7 +216,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
" setting\n");
@@ -342,8 +342,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
unsigned int n = 0, i;
enum efx_loopback_mode mode;
- efx_fill_test(n++, strings, data, &tests->mdio,
- "core", 0, "mdio", NULL);
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
efx_fill_test(n++, strings, data, &tests->nvram,
"core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
@@ -379,7 +379,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
if (name == NULL)
break;
- efx_fill_test(n++, strings, data, &tests->phy[i],
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
"phy", 0, name, NULL);
}
}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 17afcd26e870..08278e7302b3 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -15,6 +15,7 @@
#include <linux/seq_file.h>
#include <linux/i2c.h>
#include <linux/mii.h>
+#include <linux/slab.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
@@ -909,6 +910,8 @@ static int falcon_probe_port(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
else
efx->wanted_fc = EFX_FC_RX;
+ if (efx->mdio.mmds & MDIO_DEVS_AN)
+ efx->wanted_fc |= EFX_FC_AUTO;
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
@@ -925,6 +928,7 @@ static int falcon_probe_port(struct efx_nic *efx)
static void falcon_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
@@ -1005,7 +1009,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
static const struct efx_nic_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
{ FR_AZ_TX_CFG,
@@ -1316,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
- falcon_probe_board(efx, board_rev);
+ rc = falcon_probe_board(efx, board_rev);
+ if (rc)
+ goto fail2;
kfree(nvconfig);
return 0;
@@ -1727,7 +1733,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
/**************************************************************************
*
- * Revision-dependent attributes used by efx.c
+ * Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
*/
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index bf0b96af5334..c7a933a3292e 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -29,6 +29,15 @@
#define FALCON_BOARD_SFN4111T 0x51
#define FALCON_BOARD_SFN4112F 0x52
+/* Board temperature is about 15°C above ambient when air flow is
+ * limited. */
+#define FALCON_BOARD_TEMP_BIAS 15
+
+/* SFC4000 datasheet says: 'The maximum permitted junction temperature
+ * is 125°C; the thermal design of the environment for the SFC4000
+ * should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MAX 90
+
/*****************************************************************************
* Support for LM87 sensor chip used on several boards
*/
@@ -548,16 +557,16 @@ fail_hwmon:
static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfe4002_lm87_regs[] = {
- LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
- LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
- LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
- LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
- LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
- LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
- LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
- LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
- LM87_TEMP_INT_LIMITS(10, 60), /* board */
- LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx)
static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfn4112f_lm87_regs[] = {
- LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
- LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
- LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
- LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
- LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
- LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
- LM87_TEMP_INT_LIMITS(10, 60), /* board */
- LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
@@ -719,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
},
};
-static const struct falcon_board_type falcon_dummy_board = {
- .init = efx_port_dummy_op_int,
- .init_phy = efx_port_dummy_op_void,
- .fini = efx_port_dummy_op_void,
- .set_id_led = efx_port_dummy_op_set_id_led,
- .monitor = efx_port_dummy_op_int,
-};
-
-void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -745,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
+ return 0;
} else {
EFX_ERR(efx, "unknown board type %d\n", type_id);
- board->type = &falcon_dummy_board;
+ return -ENODEV;
}
}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 3da933f8f079..8ccab2c67a20 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
}
-/* Get status of XAUI link */
-static bool falcon_xaui_link_ok(struct efx_nic *efx)
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
{
efx_oword_t reg;
bool align_done, link_ok = false;
int sync_status;
- if (LOOPBACK_INTERNAL(efx))
- return true;
-
/* Read link status */
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
@@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
- /* If the link is up, then check the phy side of the xaui link */
- if (efx->link_state.up && link_ok)
- if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
- link_ok = efx_mdio_phyxgxs_lane_sync(efx);
-
return link_ok;
}
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
@@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
{
- bool mac_up = falcon_xaui_link_ok(efx);
+ bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
efx_phy_mode_disabled(efx->phy_mode))
@@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
falcon_reset_xaui(efx);
udelay(200);
- mac_up = falcon_xaui_link_ok(efx);
+ mac_up = falcon_xmac_link_ok(efx);
--tries;
}
@@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
static bool falcon_xmac_check_fault(struct efx_nic *efx)
{
- return !falcon_check_xaui_link_up(efx, 5);
+ return !falcon_xmac_link_ok_retry(efx, 5);
}
static int falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
falcon_reconfigure_mac_wrapper(efx);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_mask_status_intr(efx, true);
return 0;
@@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
return;
falcon_mask_status_intr(efx, false);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_mask_status_intr(efx, true);
}
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 683353b904c7..c48669c77414 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
efx_dword_t reg;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
- rc = efx_mcdi_poll_reboot(efx);
+ rc = -efx_mcdi_poll_reboot(efx);
if (rc)
goto out;
@@ -142,8 +142,9 @@ static int efx_mcdi_poll(struct efx_nic *efx)
if (spins != 0) {
--spins;
udelay(1);
- } else
- schedule();
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
time = get_seconds();
@@ -803,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
+ u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
size_t outlen;
int rc;
@@ -827,7 +828,7 @@ fail:
int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer, size_t length)
{
- u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
+ u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
@@ -837,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
if (rc)
goto fail;
@@ -894,29 +896,73 @@ fail:
return rc;
}
-int efx_mcdi_handle_assertion(struct efx_nic *efx)
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ return rc;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- union {
- u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 reboot[MC_CMD_REBOOT_IN_LEN];
- } inbuf;
- u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
+ u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
+ u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
unsigned int flags, index, ofst;
const char *reason;
size_t outlen;
int retry;
int rc;
- /* Check if the MC is in the assertion handler, retrying twice. Once
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
* because a boot-time assertion might cause this command to fail
* with EINTR. And once again because GET_ASSERTS can race with
* MC_CMD_REBOOT running on the other port. */
retry = 2;
do {
- MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
- assertion, sizeof(assertion), &outlen);
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc)
@@ -924,21 +970,11 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EINVAL;
- flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
return 0;
- /* Reset the hardware atomically such that only one port with succeed.
- * This command will succeed if a reboot is no longer required (because
- * the other port did it first), but fail with EIO if it succeeds.
- */
- BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
- MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
- MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
-
- /* Print out the assertion */
reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
? "system-level assertion"
: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
@@ -947,20 +983,45 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
? "watchdog reset"
: "unknown assertion";
EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
for (index = 1; index < 32; index++) {
EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(assertion, ofst));
+ MCDI_DWORD2(outbuf, ofst));
ofst += sizeof(efx_dword_t);
}
return 0;
}
+static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+
+ /* Atomically reboot the mcfw out of the assertion handler */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc)
+ return rc;
+
+ efx_mcdi_exit_assertion(efx);
+
+ return 0;
+}
+
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index de916728c2e3..f1f89ad4075a 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -111,10 +111,12 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer,
size_t length);
+#define EFX_MCDI_NVRAM_LEN_MAX 128
extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
unsigned int type);
+extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
extern int efx_mcdi_reset_port(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 2a85360a46f0..bd59302695b3 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -786,16 +786,18 @@
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2
-#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
@@ -832,7 +834,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
-/* MC_CMD_START_PHY_BIST:
+/* MC_CMD_START_BIST:
* Start a BIST test on the PHY.
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -840,34 +842,71 @@
*/
#define MC_CMD_START_BIST 0x25
#define MC_CMD_START_BIST_IN_LEN 4
-#define MC_CMD_START_BIST_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_OUT_LEN 0
-/* Run the PHY's short BIST */
-#define MC_CMD_PHY_BIST_SHORT 1
-/* Run the PHY's long BIST */
-#define MC_CMD_PHY_BIST_LONG 2
+/* Run the PHY's short cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 1
+/* Run the PHY's long cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_LONG 2
/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
#define MC_CMD_BPX_SERDES_BIST 3
+/* Run the MC loopback tests */
+#define MC_CMD_MC_LOOPBACK_BIST 4
+/* Run the PHY's standard BIST */
+#define MC_CMD_PHY_BIST 5
/* MC_CMD_POLL_PHY_BIST: (variadic output)
* Poll for BIST completion
*
- * Returns a single status code, and a binary blob of phy-specific
- * bist output. If the driver can't succesfully parse the BIST output,
- * it should still respect the Pass/Fail in OUT.RESULT.
+ * Returns a single status code, and optionally some PHY specific
+ * bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * Locks required: PHY_LOCK if doing a PHY BIST
+ * If a driver can't succesfully parse the BIST output, it should
+ * still respect the pass/Fail in OUT.RESULT
+ *
+ * Locks required: PHY_LOCK if doing a PHY BIST
* Return code: 0, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
#define MC_CMD_POLL_BIST_PASSED 2
#define MC_CMD_POLL_BIST_FAILED 3
#define MC_CMD_POLL_BIST_TIMEOUT 4
+/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+/* SFT9001-specific: */
+/* (offset 4 unused?) */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
+/* mrsfp "PHY" driver: */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
/* MC_CMD_PHY_SPI: (variadic in, variadic out)
* Read/Write/Erase the PHY SPI device
@@ -1090,8 +1129,10 @@
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
#define MC_CMD_MAC_RX_MATCH_FAULT 59
+#define MC_CMD_GMAC_DMABUF_START 64
+#define MC_CMD_GMAC_DMABUF_END 95
/* Insert new members here. */
-#define MC_CMD_MAC_GENERATION_END 60
+#define MC_CMD_MAC_GENERATION_END 96
#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
/* MC_CMD_MAC_STATS:
@@ -1204,6 +1245,13 @@
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
@@ -1214,7 +1262,8 @@
#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
#define MC_CMD_WOL_TYPE_BITMAP 0x5
-#define MC_CMD_WOL_TYPE_MAX 0x6
+#define MC_CMD_WOL_TYPE_LINK 0x6
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_FILTER_MODE_SIMPLE 0x0
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
@@ -1355,14 +1404,24 @@
* Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
-#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
/* MC_CMD_REBOOT:
- * Reboot the MC. The AFTER_ASSERTION flag is intended to be used
- * when the driver notices an assertion failure, to allow two ports to
- * both recover (semi-)gracefully.
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices
+ * an assertion failure (at which point it is expected to perform a complete
+ * tear down and reinitialise), to allow both ports to reset the MC once
+ * in an atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares
+ * with REBOOT_ON_ASSERT=0.
*
* Locks required: NONE
* Returns: Nothing. You get back a response with ERR=1, DATALEN=0
@@ -1467,11 +1526,10 @@
((_ofst) + 6)
/* MC_CMD_READ_SENSORS
- * Returns the current (value, state) for each sensor
+ * Returns the current reading from each sensor
*
- * Returns the current (value, state) [each 16bit] of each sensor supported by
- * this board, by DMA'ing a sparse array (indexed by the sensor type) into host
- * memory.
+ * Returns a sparse array of sensor readings (indexed by the sensor
+ * type) into host memory. Each array element is a dword.
*
* The MC will send a SENSOREVT event every time any sensor changes state. The
* driver is responsible for ensuring that it doesn't miss any events. The board
@@ -1484,6 +1542,12 @@
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* Sensor reading fields */
+#define MC_CMD_READ_SENSOR_VALUE_LBN 0
+#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
+#define MC_CMD_READ_SENSOR_STATE_LBN 16
+#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
+
/* MC_CMD_GET_PHY_STATE:
* Report current state of PHY. A "zombie" PHY is a PHY that has failed to
@@ -1575,4 +1639,98 @@
#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+/* MC_CMD_TEST_ASSERT:
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully).
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+
+#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_TESTASSERT_IN_LEN 0
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND 0x4a
+ *
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it
+ * doesn't understand the given workaround number - which should not
+ * be treated as a hard error by client code.
+ *
+ * This op does not imply any semantics about each workaround, that's between
+ * the driver and the mcfw on a per-workaround basis.
+ *
+ * Locks required: None
+ * Returns: 0, EINVAL
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_BUG17230 1
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO:
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs).
+ *
+ * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
+ * the valid "page number" input values, and the output data, are interpreted
+ * on a per-type basis.
+ *
+ * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
+ * 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+
+/* MC_CMD_NVRAM_TEST:
+ * Test a particular NVRAM partition for valid contents (where "valid"
+ * depends on the type of partition).
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_PASS 0
+#define MC_CMD_NVRAM_TEST_FAIL 1
+#define MC_CMD_NVRAM_TEST_NOTSUPP 2
+
+/* MC_CMD_MRSFP_TWEAK: (debug)
+ * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first.
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+
+/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
+ * used for post-3.0 extensions. If you run out of space, look for gaps or
+ * commands that are unused in the existing range. */
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e1bcc5a0d52..2f2354696663 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -11,6 +11,7 @@
* Driver for PHY related operations via MCDI.
*/
+#include <linux/slab.h>
#include "efx.h"
#include "phy.h"
#include "mcdi.h"
@@ -304,31 +305,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg;
+ struct efx_mcdi_phy_cfg *phy_data;
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ u32 caps;
int rc;
- /* TODO: Move phy_data initialisation to
- * phy_op->probe/remove, rather than init/fini */
- phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
- if (phy_cfg == NULL) {
- rc = -ENOMEM;
- goto fail_alloc;
- }
- rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
if (rc != 0)
goto fail;
- efx->phy_type = phy_cfg->type;
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
- efx->mdio_bus = phy_cfg->channel;
- efx->mdio.prtad = phy_cfg->port;
- efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
+
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
efx->mdio.mode_support = 0;
- if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->link_advertising =
+ mcdi_to_ethtool_cap(phy_data->media, caps);
+ else
+ phy_data->forced_cap = caps;
+
/* Assert that we can map efx -> mcdi loopback modes */
BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@@ -365,45 +382,17 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
- kfree(phy_cfg);
-
- return 0;
-
-fail:
- kfree(phy_cfg);
-fail_alloc:
- return rc;
-}
-
-static int efx_mcdi_phy_init(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_cfg *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- u32 caps;
- int rc;
-
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (phy_data == NULL)
- return -ENOMEM;
-
- rc = efx_mcdi_get_phy_cfg(efx, phy_data);
- if (rc != 0)
- goto fail;
-
- efx->phy_data = phy_data;
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- goto fail;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
- if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
- else
- phy_data->forced_cap = caps;
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
return 0;
@@ -460,7 +449,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
/* The link partner capabilities are only relevent if the
* link supports flow control autonegotiation */
- if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
/* If flow control autoneg is supported and enabled, then fine */
@@ -504,7 +493,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void efx_mcdi_phy_fini(struct efx_nic *efx)
+static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -584,14 +573,37 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
return 0;
}
+static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EMSGSIZE;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
- .init = efx_mcdi_phy_init,
+ .init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
.poll = efx_mcdi_phy_poll,
- .fini = efx_mcdi_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
+ .test_alive = efx_mcdi_phy_test_alive,
.run_tests = NULL,
.test_name = NULL,
};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 1574e52f0594..0548fcbbdcd0 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -335,3 +335,27 @@ enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
mii_advertise_flowctrl(efx->wanted_fc),
efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
+
+int efx_mdio_test_alive(struct efx_nic *efx)
+{
+ int rc;
+ int devad = __ffs(efx->mdio.mmds);
+ u16 physid1, physid2;
+
+ mutex_lock(&efx->mac_lock);
+
+ physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
+
+ if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+ (physid2 == 0x0000) || (physid2 == 0xffff)) {
+ EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
+ efx->mdio.prtad);
+ rc = -EINVAL;
+ } else {
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+ }
+
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f6ac9503339d..f89e71929603 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -106,4 +106,7 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
+/* Liveness self-test for MDIO PHYs */
+extern int efx_mdio_test_alive(struct efx_nic *efx);
+
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 3a464529a46b..f3ac7f30b5e7 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/rtnetlink.h>
#define EFX_DRIVER_NAME "sfc_mtd"
@@ -23,7 +24,6 @@
#include "mcdi_pcol.h"
#define EFX_SPI_VERIFY_BUF_LEN 16
-#define EFX_MCDI_CHUNK_LEN 128
struct efx_mtd_partition {
struct mtd_info mtd;
@@ -428,7 +428,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
int rc = 0;
while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
@@ -491,7 +491,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
}
while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 34c381f009b7..cb018e272097 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,7 +18,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/timer.h>
#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -101,9 +100,6 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
* Special buffers are used for the event queues and the TX and RX
* descriptor queues for each channel. They are *not* used for the
* actual transmit and receive buffers.
- *
- * Note that for Falcon, TX and RX descriptor queues live in host memory.
- * Allocation and freeing procedures must take this into account.
*/
struct efx_special_buffer {
void *addr;
@@ -300,7 +296,7 @@ struct efx_rx_queue {
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
*
- * Falcon uses these buffers for its interrupt status registers and
+ * The NIC uses these buffers for its interrupt status registers and
* MAC stats dumps.
*/
struct efx_buffer {
@@ -516,14 +512,16 @@ struct efx_mac_operations {
* @set_settings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
+ * @test_alive: Test that PHY is 'alive' (online)
* @test_name: Get the name of a PHY-specific test/result
- * @run_tests: Run tests and record results as appropriate.
+ * @run_tests: Run tests and record results as appropriate (offline).
* Flags are the ethtool tests flags.
*/
struct efx_phy_operations {
int (*probe) (struct efx_nic *efx);
int (*init) (struct efx_nic *efx);
void (*fini) (struct efx_nic *efx);
+ void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
void (*get_settings) (struct efx_nic *efx,
@@ -531,6 +529,7 @@ struct efx_phy_operations {
int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd);
void (*set_npage_adv) (struct efx_nic *efx, u32);
+ int (*test_alive) (struct efx_nic *efx);
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
};
@@ -671,7 +670,7 @@ union efx_multicast_hash {
* @irq_status: Interrupt status buffer
* @last_irq_cpu: Last CPU to handle interrupt.
* This register is written with the SMP processor ID whenever an
- * interrupt is handled. It is used by falcon_test_interrupt()
+ * interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
@@ -720,8 +719,7 @@ union efx_multicast_hash {
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
*
- * The @priv field of the corresponding &struct net_device points to
- * this.
+ * This is stored in the private area of the &struct net_device.
*/
struct efx_nic {
char name[IFNAMSIZ];
@@ -994,7 +992,7 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
* that the net driver will program into the MAC as the maximum frame
* length.
*
- * The 10G MAC used in Falcon requires 8-byte alignment on the frame
+ * The 10G MAC requires 8-byte alignment on the frame
* length, so we round up to the nearest 8.
*
* Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index a577be227862..b06f8e348307 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -623,10 +623,6 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
- *
- * Note that EVQ_RPTR_REG contains the index of the "last read" event,
- * whereas channel->eventq_read_ptr contains the index of the "next to
- * read" event.
*/
void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
@@ -1384,6 +1380,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ } else if (EFX_WORKAROUND_15783(efx)) {
+ /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
+ * because this might be a shared interrupt, but we do need to
+ * check the channel every time and preemptively rearm it if
+ * it's idle. */
+ efx_for_each_channel(channel, efx) {
+ if (!channel->work_pending)
+ efx_nic_eventq_read_ack(channel);
+ }
}
return result;
@@ -1576,6 +1581,8 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 9351c0331a47..3166bafdfbef 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
**************************************************************************
*/
-extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 3800fc791b2f..e077bef08a50 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -10,6 +10,7 @@
* Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
*/
+#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include "efx.h"
@@ -33,6 +34,9 @@
#define PCS_FW_HEARTBEAT_REG 0xd7ee
#define PCS_FW_HEARTB_LBN 0
#define PCS_FW_HEARTB_WIDTH 8
+#define PCS_FW_PRODUCT_CODE_1 0xd7f0
+#define PCS_FW_VERSION_1 0xd7f3
+#define PCS_FW_BUILD_1 0xd7f6
#define PCS_UC8051_STATUS_REG 0xd7fd
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
@@ -52,14 +56,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
struct qt202x_phy_data {
enum efx_phy_mode phy_mode;
+ bool bug17190_in_bad_state;
+ unsigned long bug17190_timer;
+ u32 firmware_ver;
};
#define QT2022C2_MAX_RESET_TIME 500
#define QT2022C2_RESET_WAIT 10
-static int qt2025c_wait_reset(struct efx_nic *efx)
+#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
+#define QT2025C_HEARTB_WAIT 100
+#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
+#define QT2025C_FWSTART_WAIT 100
+
+#define BUG17190_INTERVAL (2 * HZ)
+
+static int qt2025c_wait_heartbeat(struct efx_nic *efx)
{
- unsigned long timeout = jiffies + 10 * HZ;
+ unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
@@ -74,11 +88,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
old_counter = counter;
else if (counter != old_counter)
break;
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ /* Some cables have EEPROMs that conflict with the
+ * PHY's on-board EEPROM so it cannot load firmware */
+ EFX_ERR(efx, "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
return -ETIMEDOUT;
- msleep(10);
+ }
+ msleep(QT2025C_HEARTB_WAIT);
}
+ return 0;
+}
+
+static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
+ int reg;
+
/* Wait for firmware status to look good */
for (;;) {
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@@ -90,7 +118,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
+ msleep(QT2025C_FWSTART_WAIT);
+ }
+
+ return 0;
+}
+
+static void qt2025c_restart_firmware(struct efx_nic *efx)
+{
+ /* Restart microcontroller execution of firmware from RAM */
+ efx_mdio_write(efx, 3, 0xe854, 0x00c0);
+ efx_mdio_write(efx, 3, 0xe854, 0x0040);
+ msleep(50);
+}
+
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = qt2025c_wait_fw_status_good(efx);
+ if (rc == -ETIMEDOUT) {
+ /* Bug 17689: occasionally heartbeat starts but firmware status
+ * code never progresses beyond 0x00. Try again, once, after
+ * restarting execution of the firmware image. */
+ EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+ qt2025c_restart_firmware(efx);
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+ rc = qt2025c_wait_fw_status_good(efx);
+ }
+
+ return rc;
+}
+
+static void qt2025c_firmware_id(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ u8 firmware_id[9];
+ size_t i;
+
+ for (i = 0; i < sizeof(firmware_id); i++)
+ firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+ PCS_FW_PRODUCT_CODE_1 + i);
+ EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
+ phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
+ ((firmware_id[3] & 0x0f) << 16) |
+ (firmware_id[4] << 8) | firmware_id[5];
+}
+
+static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
+ * layers up, but PCS down (no block_lock). If we notice this state
+ * persisting for a couple of seconds, we switch PMA/PMD loopback
+ * briefly on and then off again, which is normally sufficient to
+ * recover it.
+ */
+ if (efx->link_state.up ||
+ !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+ phy_data->bug17190_in_bad_state = false;
+ return;
+ }
+
+ if (!phy_data->bug17190_in_bad_state) {
+ phy_data->bug17190_in_bad_state = true;
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ return;
+ }
+
+ if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
+ EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, false);
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ }
+}
+
+static int qt2025c_select_phy_mode(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ struct falcon_board *board = falcon_board(efx);
+ int reg, rc, i;
+ uint16_t phy_op_mode;
+
+ /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
+ * Self-Configure mode. Don't attempt any switching if we encounter
+ * older firmware. */
+ if (phy_data->firmware_ver < 0x02000100)
+ return 0;
+
+ /* In general we will get optimal behaviour in "SFP+ Self-Configure"
+ * mode; however, that powers down most of the PHY when no module is
+ * present, so we must use a different mode (any fixed mode will do)
+ * to be sure that loopbacks will work. */
+ phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
+
+ /* Only change mode if really necessary */
+ reg = efx_mdio_read(efx, 1, 0xc319);
+ if ((reg & 0x0038) == phy_op_mode)
+ return 0;
+ EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+
+ /* This sequence replicates the register writes configured in the boot
+ * EEPROM (including the differences between board revisions), except
+ * that the operating mode is changed, and the PHY is prevented from
+ * unnecessarily reloading the main firmware image again. */
+ efx_mdio_write(efx, 1, 0xc300, 0x0000);
+ /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
+ * STOPs onto the firmware/module I2C bus to reset it, varies across
+ * board revisions, as the bus is connected to different GPIO/LED
+ * outputs on the PHY.) */
+ if (board->major == 0 && board->minor < 2) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4488);
+ efx_mdio_write(efx, 1, 0xc303, 0x4480);
+ efx_mdio_write(efx, 1, 0xc303, 0x4490);
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ }
+ } else {
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x0900);
+ efx_mdio_write(efx, 1, 0xd008, 0x0005);
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ efx_mdio_write(efx, 1, 0xc302, 0x0004);
+ efx_mdio_write(efx, 1, 0xc316, 0x0013);
+ efx_mdio_write(efx, 1, 0xc318, 0x0054);
+ efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
+ efx_mdio_write(efx, 1, 0xc31a, 0x0098);
+ efx_mdio_write(efx, 3, 0x0026, 0x0e00);
+ efx_mdio_write(efx, 3, 0x0027, 0x0013);
+ efx_mdio_write(efx, 3, 0x0028, 0xa528);
+ efx_mdio_write(efx, 1, 0xd006, 0x000a);
+ efx_mdio_write(efx, 1, 0xd007, 0x0009);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ /* This additional write is not present in the boot EEPROM. It
+ * prevents the PHY's internal boot ROM doing another pointless (and
+ * slow) reload of the firmware image (the microcontroller's code
+ * memory is not affected by the microcontroller reset). */
+ efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ efx_mdio_write(efx, 1, 0xc300, 0x0002);
+ msleep(20);
+
+ /* Restart microcontroller execution of firmware from RAM */
+ qt2025c_restart_firmware(efx);
+
+ /* Wait for the microcontroller to be ready again */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0) {
+ EFX_ERR(efx, "PHY microcontroller reset during mode switch "
+ "timed out\n");
+ return rc;
}
return 0;
@@ -120,15 +319,9 @@ static int qt202x_reset_phy(struct efx_nic *efx)
/* Wait 250ms for the PHY to complete bootup */
msleep(250);
- /* Check that all the MMDs we expect are present and responding. We
- * expect faults on some if the link is down, but not on the PHY XS */
- rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
- if (rc < 0)
- goto fail;
-
falcon_board(efx)->type->init_phy(efx);
- return rc;
+ return 0;
fail:
EFX_ERR(efx, "PHY reset timed out\n");
@@ -137,6 +330,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
static int qt202x_phy_probe(struct efx_nic *efx)
{
+ struct qt202x_phy_data *phy_data;
+
+ phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->bug17190_in_bad_state = false;
+ phy_data->bug17190_timer = 0;
+
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@@ -145,7 +348,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
static int qt202x_phy_init(struct efx_nic *efx)
{
- struct qt202x_phy_data *phy_data;
u32 devid;
int rc;
@@ -155,17 +357,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
return rc;
}
- phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
-
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
efx_mdio_id_rev(devid));
- phy_data->phy_mode = efx->phy_mode;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_firmware_id(efx);
+
return 0;
}
@@ -183,6 +382,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_bug17190_workaround(efx);
+
return efx->link_state.up != was_up;
}
@@ -191,6 +393,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
struct qt202x_phy_data *phy_data = efx->phy_data;
if (efx->phy_type == PHY_TYPE_QT2025C) {
+ int rc = qt2025c_select_phy_mode(efx);
+ if (rc)
+ return rc;
+
/* There are several different register bits which can
* disable TX (and save power) on direct-attach cables
* or optical transceivers, varying somewhat between
@@ -224,7 +430,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
-static void qt202x_phy_fini(struct efx_nic *efx)
+static void qt202x_phy_remove(struct efx_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
@@ -236,7 +442,9 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
- .fini = qt202x_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
};
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 89d606fe9248..18a3be428348 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -95,7 +95,7 @@
#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
-/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */
+/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
#define FR_BZ_INT_ISR0 0x00000090
#define FRF_BZ_INT_ISR_REG_LBN 0
#define FRF_BZ_INT_ISR_REG_WIDTH 64
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index a97c923b560c..e308818b9f55 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -10,6 +10,7 @@
#include <linux/socket.h>
#include <linux/in.h>
+#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index af3933579790..0106b1d9aae2 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -18,15 +18,13 @@
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/rtnetlink.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
-#include "spi.h"
-#include "io.h"
-#include "mdio_10g.h"
/*
* Loopback test packet structure
@@ -76,38 +74,15 @@ struct efx_loopback_state {
*
**************************************************************************/
-static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- int devad = __ffs(efx->mdio.mmds);
- u16 physid1, physid2;
- if (efx->phy_type == PHY_TYPE_NONE)
- return 0;
-
- mutex_lock(&efx->mac_lock);
- tests->mdio = -1;
-
- physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
- physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
-
- if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
- (physid2 == 0x0000) || (physid2 == 0xffff)) {
- EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
- efx->mdio.prtad);
- rc = -EINVAL;
- goto out;
+ if (efx->phy_op->test_alive) {
+ rc = efx->phy_op->test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
}
- if (EFX_IS10G(efx)) {
- rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
- if (rc)
- goto out;
- }
-
-out:
- mutex_unlock(&efx->mac_lock);
- tests->mdio = rc ? -1 : 1;
return rc;
}
@@ -254,7 +229,7 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
return 0;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->run_tests(efx, tests->phy, flags);
+ rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -680,7 +655,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
- rc = efx_test_mdio(efx, tests);
+ rc = efx_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6feee04c96b..643bef72b99d 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests {
*/
struct efx_self_tests {
/* online tests */
- int mdio;
+ int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
@@ -40,7 +40,7 @@ struct efx_self_tests {
int eventq_poll[EFX_MAX_CHANNELS];
/* offline tests */
int registers;
- int phy[EFX_MAX_PHY_TESTS];
+ int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index de07a4f031b2..e0c46f59d1f8 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
@@ -106,16 +107,11 @@ static int siena_probe_port(struct efx_nic *efx)
efx->mdio.mdio_read = siena_mdio_read;
efx->mdio.mdio_write = siena_mdio_write;
- /* Fill out MDIO structure and loopback modes */
+ /* Fill out MDIO structure, loopback modes, and initial link state */
rc = efx->phy_op->probe(efx);
if (rc != 0)
return rc;
- /* Initial assumption */
- efx->link_state.speed = 10000;
- efx->link_state.fd = true;
- efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
-
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
MC_CMD_MAC_NSTATS * sizeof(u64));
@@ -133,12 +129,13 @@ static int siena_probe_port(struct efx_nic *efx)
void siena_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_CFG,
@@ -180,6 +177,12 @@ static int siena_test_registers(struct efx_nic *efx)
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
@@ -453,8 +456,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
static void siena_update_nic_stats(struct efx_nic *efx)
{
- while (siena_try_update_nic_stats(efx) == -EAGAIN)
- cpu_relax();
+ int retry;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (retry = 0; retry < 100; ++retry) {
+ if (siena_try_update_nic_stats(efx) == 0)
+ return;
+ udelay(100);
+ }
+
+ /* Use the old values instead */
}
static void siena_start_nic_stats(struct efx_nic *efx)
@@ -581,6 +593,7 @@ struct efx_nic_type siena_a0_nic_type = {
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_registers = siena_test_registers,
+ .test_nvram = efx_mcdi_nvram_test_all,
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index ca11572a49a9..f21efe7bd316 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "efx.h"
#include "mdio_10g.h"
#include "nic.h"
@@ -202,10 +203,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
int rc;
rtnl_lock();
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
- MDIO_PMA_10GBT_TXPWR_SHORT,
- count != 0 && *buf != '0');
- rc = efx_reconfigure_port(efx);
+ if (efx->state != STATE_RUNNING) {
+ rc = -EBUSY;
+ } else {
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
+ MDIO_PMA_10GBT_TXPWR_SHORT,
+ count != 0 && *buf != '0');
+ rc = efx_reconfigure_port(efx);
+ }
rtnl_unlock();
return rc < 0 ? rc : (ssize_t)count;
@@ -298,36 +303,62 @@ static int tenxpress_init(struct efx_nic *efx)
return 0;
}
-static int sfx7101_phy_probe(struct efx_nic *efx)
+static int tenxpress_phy_probe(struct efx_nic *efx)
{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
- return 0;
-}
+ struct tenxpress_phy_data *phy_data;
+ int rc;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ /* Create any special files */
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_phy_short_reach);
+ if (rc)
+ goto fail;
+ }
+
+ if (efx->phy_type == PHY_TYPE_SFX7101) {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45;
+
+ efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full);
+ } else {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ efx->loopback_modes = (SFT9001_LOOPBACKS |
+ FALCON_XMAC_LOOPBACKS |
+ FALCON_GMAC_LOOPBACKS);
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full);
+ }
-static int sft9001_phy_probe(struct efx_nic *efx)
-{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
- FALCON_GMAC_LOOPBACKS);
return 0;
+
+fail:
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+ return rc;
}
static int tenxpress_phy_init(struct efx_nic *efx)
{
- struct tenxpress_phy_data *phy_data;
- int rc = 0;
+ int rc;
falcon_board(efx)->type->init_phy(efx);
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
- phy_data->phy_mode = efx->phy_mode;
-
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
if (efx->phy_type == PHY_TYPE_SFT9001A) {
int reg;
@@ -341,44 +372,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
- goto fail;
+ return rc;
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
- goto fail;
+ return rc;
}
rc = tenxpress_init(efx);
if (rc < 0)
- goto fail;
+ return rc;
- /* Initialise advertising flags */
- efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full);
- if (efx->phy_type != PHY_TYPE_SFX7101)
- efx->link_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full);
+ /* Reinitialise flow control settings */
efx_link_set_wanted_fc(efx, efx->wanted_fc);
efx_mdio_an_reconfigure(efx);
- if (efx->phy_type == PHY_TYPE_SFT9001B) {
- rc = device_create_file(&efx->pci_dev->dev,
- &dev_attr_phy_short_reach);
- if (rc)
- goto fail;
- }
-
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
/* Let XGXS and SerDes out of reset */
falcon_reset_xaui(efx);
return 0;
-
- fail:
- kfree(efx->phy_data);
- efx->phy_data = NULL;
- return rc;
}
/* Perform a "special software reset" on the PHY. The caller is
@@ -589,25 +603,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void tenxpress_phy_fini(struct efx_nic *efx)
+static void sfx7101_phy_fini(struct efx_nic *efx)
{
int reg;
+ /* Power down the LNPGA */
+ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ /* Waiting here ensures that the board fini, which can turn
+ * off the power to the PHY, won't get run until the LNPGA
+ * powerdown has been given long enough to complete. */
+ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+}
+
+static void tenxpress_phy_remove(struct efx_nic *efx)
+{
if (efx->phy_type == PHY_TYPE_SFT9001B)
device_remove_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- /* Power down the LNPGA */
- reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
-
- /* Waiting here ensures that the board fini, which can turn
- * off the power to the PHY, won't get run until the LNPGA
- * powerdown has been given long enough to complete. */
- schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
- }
-
kfree(efx->phy_data);
efx->phy_data = NULL;
}
@@ -819,27 +834,31 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
- .probe = sfx7101_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = sfx7101_phy_fini,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};
struct efx_phy_operations falcon_sft9001_phy_ops = {
- .probe = sft9001_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sft9001_test_name,
.run_tests = sft9001_run_tests,
};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e669f94e821b..be0e110a1f73 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -13,6 +13,7 @@
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/ipv6.h>
+#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
@@ -821,8 +822,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
EFX_TXQ_MASK];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
- buffer->len = 0;
- buffer->continuation = true;
if (buffer->unmap_len) {
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
@@ -836,6 +835,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
}
+ buffer->len = 0;
+ buffer->continuation = true;
}
}
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 6b364a6c6c60..c8fc896fc460 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -592,8 +593,10 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup... */
len = skb->len;
if (len < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
+ if (skb_padto(skb, ETH_ZLEN)) {
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
return NETDEV_TX_OK;
+ }
len = ETH_ZLEN;
}
@@ -660,7 +663,7 @@ static void sgiseeq_set_multicast(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
sp->mode = SEEQ_RCMD_RANY;
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
sp->mode = SEEQ_RCMD_RBMCAST;
else
sp->mode = SEEQ_RCMD_RBCAST;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index ca6285016dfd..6242b85d5d15 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -31,6 +31,7 @@
#include <linux/cache.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include <asm/cacheflush.h>
#include "sh_eth.h"
@@ -110,7 +111,7 @@ static void sh_eth_reset(struct net_device *ndev)
mdelay(1);
cnt--;
}
- if (cnt < 0)
+ if (cnt == 0)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
@@ -1473,13 +1474,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
- /* pritnt device infomation */
- pr_info("Base address at 0x%x, ",
- (u32)ndev->base_addr);
-
- for (i = 0; i < 5; i++)
- printk("%02X:", ndev->dev_addr[i]);
- printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
+ /* print device infomation */
+ pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a0..b30ce752bbf3 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -17,7 +17,9 @@
See the file COPYING in this distribution for more information.
- */
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -30,27 +32,16 @@
#include <linux/delay.h>
#include <linux/crc32.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <asm/irq.h>
-#define net_drv(p, arg...) if (netif_msg_drv(p)) \
- printk(arg)
-#define net_probe(p, arg...) if (netif_msg_probe(p)) \
- printk(arg)
-#define net_link(p, arg...) if (netif_msg_link(p)) \
- printk(arg)
-#define net_intr(p, arg...) if (netif_msg_intr(p)) \
- printk(arg)
-#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
- printk(arg)
-
#define PHY_MAX_ADDR 32
#define PHY_ID_ANY 0x1f
#define MII_REG_ANY 0x1f
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "1.4"
#define DRV_NAME "sis190"
#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
-#define PFX DRV_NAME ": "
#define sis190_rx_skb netif_rx
#define sis190_rx_quota(count, quota) count
@@ -294,6 +285,12 @@ struct sis190_private {
struct mii_if_info mii_if;
struct list_head first_phy;
u32 features;
+ u32 negotiated_lpa;
+ enum {
+ LNK_OFF,
+ LNK_ON,
+ LNK_AUTONEG,
+ } link_status;
};
struct sis190_phy {
@@ -334,7 +331,7 @@ static const struct {
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
@@ -381,7 +378,7 @@ static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
}
if (i > 99)
- printk(KERN_ERR PFX "PHY command failed !\n");
+ pr_err("PHY command failed !\n");
}
static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
@@ -493,18 +490,24 @@ static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
{
u32 rx_buf_sz = tp->rx_buf_sz;
struct sk_buff *skb;
+ dma_addr_t mapping;
skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
- if (likely(skb)) {
- dma_addr_t mapping;
-
- mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- sis190_map_to_asic(desc, mapping, rx_buf_sz);
- } else
- sis190_make_unusable_by_asic(desc);
+ if (unlikely(!skb))
+ goto skb_alloc_failed;
+ mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(tp->pci_dev, mapping))
+ goto out;
+ sis190_map_to_asic(desc, mapping, rx_buf_sz);
return skb;
+
+out:
+ dev_kfree_skb_any(skb);
+skb_alloc_failed:
+ sis190_make_unusable_by_asic(desc);
+ return NULL;
}
static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
@@ -589,8 +592,7 @@ static int sis190_rx_interrupt(struct net_device *dev,
status = le32_to_cpu(desc->PSize);
- // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
- // status);
+ //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
if (sis190_rx_pkt_err(status, stats) < 0)
sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -601,9 +603,8 @@ static int sis190_rx_interrupt(struct net_device *dev,
struct pci_dev *pdev = tp->pci_dev;
if (unlikely(pkt_size > tp->rx_buf_sz)) {
- net_intr(tp, KERN_INFO
- "%s: (frag) status = %08x.\n",
- dev->name, status);
+ netif_info(tp, intr, dev,
+ "(frag) status = %08x\n", status);
stats->rx_dropped++;
stats->rx_length_errors++;
sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -637,12 +638,12 @@ static int sis190_rx_interrupt(struct net_device *dev,
tp->cur_rx = cur_rx;
delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (!delta && count && netif_msg_intr(tp))
- printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
- if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
- printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
+ if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
return count;
}
@@ -751,10 +752,11 @@ static irqreturn_t sis190_interrupt(int irq, void *__dev)
SIS_W32(IntrStatus, status);
- // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
+// netif_info(tp, intr, dev, "status = %08x\n", status);
if (status & LinkChange) {
- net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
+ netif_info(tp, intr, dev, "link change\n");
+ del_timer(&tp->timer);
schedule_work(&tp->phy_task);
}
@@ -841,19 +843,17 @@ static void sis190_set_rx_mode(struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
- unsigned int i;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr =
ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -929,13 +929,15 @@ static void sis190_phy_task(struct work_struct *work)
if (val & BMCR_RESET) {
// FIXME: needlessly high ? -- FR 02/07/2005
mod_timer(&tp->timer, jiffies + HZ/10);
- } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
- BMSR_ANEGCOMPLETE)) {
+ goto out_unlock;
+ }
+
+ val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
+ if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
netif_carrier_off(dev);
- net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
- dev->name);
- mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
- } else {
+ netif_warn(tp, link, dev, "auto-negotiating...\n");
+ tp->link_status = LNK_AUTONEG;
+ } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
/* Rejoice ! */
struct {
int val;
@@ -959,13 +961,13 @@ static void sis190_phy_task(struct work_struct *work)
u16 adv, autoexp, gigadv, gigrec;
val = mdio_read(ioaddr, phy_id, 0x1f);
- net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
+ netif_info(tp, link, dev, "mii ext = %04x\n", val);
val = mdio_read(ioaddr, phy_id, MII_LPA);
adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
- net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
- dev->name, val, adv, autoexp);
+ netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
+ val, adv, autoexp);
if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
/* check for gigabit speed */
@@ -1004,10 +1006,14 @@ static void sis190_phy_task(struct work_struct *work)
SIS_W32(RGDelay, 0x0440);
}
- net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
- p->msg);
+ tp->negotiated_lpa = p->val;
+
+ netif_info(tp, link, dev, "link on %s mode\n", p->msg);
netif_carrier_on(dev);
- }
+ tp->link_status = LNK_ON;
+ } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
+ tp->link_status = LNK_OFF;
+ mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
out_unlock:
rtnl_unlock();
@@ -1191,13 +1197,17 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
netif_stop_queue(dev);
- net_tx_err(tp, KERN_ERR PFX
- "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netif_err(tp, tx_err, dev,
+ "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
+ netif_err(tp, tx_err, dev,
+ "PCI mapping failed, dropping packet");
+ return NETDEV_TX_BUSY;
+ }
tp->Tx_skbuff[entry] = skb;
@@ -1211,6 +1221,12 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
wmb();
desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
+ if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
+ /* Half Duplex */
+ desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
+ if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
+ desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
+ }
tp->cur_tx++;
@@ -1287,9 +1303,9 @@ static u16 sis190_default_phy(struct net_device *dev)
if (mii_if->phy_id != phy_default->phy_id) {
mii_if->phy_id = phy_default->phy_id;
- net_probe(tp, KERN_INFO
- "%s: Using transceiver at address %d as default.\n",
- pci_name(tp->pci_dev), mii_if->phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: Using transceiver at address %d as default\n",
+ pci_name(tp->pci_dev), mii_if->phy_id);
}
status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
@@ -1327,14 +1343,15 @@ static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
LAN : HOME) : p->type;
tp->features |= p->feature;
- net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
- pci_name(tp->pci_dev), p->name, phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: %s transceiver at address %d\n",
+ pci_name(tp->pci_dev), p->name, phy_id);
} else {
phy->type = UNKNOWN;
- net_probe(tp, KERN_INFO
- "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
- pci_name(tp->pci_dev),
- phy->id[0], (phy->id[1] & 0xfff0), phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
+ pci_name(tp->pci_dev),
+ phy->id[0], (phy->id[1] & 0xfff0), phy_id);
}
}
@@ -1398,8 +1415,9 @@ static int __devinit sis190_mii_probe(struct net_device *dev)
}
if (list_empty(&tp->first_phy)) {
- net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
- pci_name(tp->pci_dev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: No MII transceivers found!\n",
+ pci_name(tp->pci_dev));
rc = -EIO;
goto out;
}
@@ -1445,7 +1463,8 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*tp));
if (!dev) {
- net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
+ if (netif_msg_drv(&debug))
+ pr_err("unable to alloc new ethernet\n");
rc = -ENOMEM;
goto err_out_0;
}
@@ -1458,34 +1477,39 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
rc = pci_enable_device(pdev);
if (rc < 0) {
- net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: enable failure\n", pci_name(pdev));
goto err_free_dev_1;
}
rc = -ENODEV;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: region #0 is no MMIO resource\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
- net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: invalid PCI region size(s)\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
rc = pci_request_regions(pdev, DRV_NAME);
if (rc < 0) {
- net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: could not request regions\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: DMA configuration failed\n",
+ pci_name(pdev));
goto err_free_res_3;
}
@@ -1493,14 +1517,16 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
if (!ioaddr) {
- net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: cannot remap MMIO, aborting\n",
+ pci_name(pdev));
rc = -EIO;
goto err_free_res_3;
}
tp->pci_dev = pdev;
tp->mmio_addr = ioaddr;
+ tp->link_status = LNK_OFF;
sis190_irq_mask_and_ack(ioaddr);
@@ -1530,9 +1556,8 @@ static void sis190_tx_timeout(struct net_device *dev)
if (tmp8 & CmdTxEnb)
SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
-
- net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
- dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
+ netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
+ SIS_R32(TxControl), SIS_R32(TxSts));
/* Disable interrupts by clearing the interrupt mask. */
SIS_W32(IntrMask, 0x0000);
@@ -1561,15 +1586,16 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
u16 sig;
int i;
- net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
/* Check to see if there is a sane EEPROM */
sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
if ((sig == 0xffff) || (sig == 0x0000)) {
- net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
- pci_name(pdev), sig);
+ if (netif_msg_probe(tp))
+ pr_info("%s: Error EEPROM read %x\n",
+ pci_name(pdev), sig);
return -EIO;
}
@@ -1603,8 +1629,8 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
u8 reg, tmp8;
unsigned int i;
- net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
for (i = 0; i < ARRAY_SIZE(ids); i++) {
isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
@@ -1613,8 +1639,9 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
}
if (!isa_bridge) {
- net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Can not find ISA bridge\n",
+ pci_name(pdev));
return -EIO;
}
@@ -1695,7 +1722,7 @@ static void sis190_set_speed_auto(struct net_device *dev)
int phy_id = tp->mii_if.phy_id;
int val;
- net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
+ netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
@@ -1822,7 +1849,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
int rc;
if (!printed_version) {
- net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
+ if (netif_msg_drv(&debug))
+ pr_info(SIS190_DRIVER_NAME " loaded\n");
printed_version = 1;
}
@@ -1862,12 +1890,14 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
if (rc < 0)
goto err_remove_mii;
- net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
- pci_name(pdev), sis_chip_info[ent->driver_data].name,
- ioaddr, dev->irq, dev->dev_addr);
-
- net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
- (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
+ if (netif_msg_probe(tp)) {
+ netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
+ pci_name(pdev),
+ sis_chip_info[ent->driver_data].name,
+ ioaddr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "%s mode.\n",
+ (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
+ }
netif_carrier_off(dev);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75e..cc0c731c4f09 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
"SiS 900 PCI Fast Ethernet",
"SiS 7016 PCI Fast Ethernet"
};
-static struct pci_device_id sis900_pci_tbl [] = {
+static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
@@ -2288,7 +2288,7 @@ static void set_rx_mode(struct net_device *net_dev)
rx_mode = RFPromiscuous;
for (i = 0; i < table_entries; i++)
mc_filter[i] = 0xffff;
- } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
(net_dev->flags & IFF_ALLMULTI)) {
/* too many multicast addresses or accept all multicast packet */
rx_mode = RFAAB | RFAAM;
@@ -2300,9 +2300,8 @@ static void set_rx_mode(struct net_device *net_dev)
* packets */
struct dev_mc_list *mclist;
rx_mode = RFAAB;
- for (i = 0, mclist = net_dev->mc_list;
- mclist && i < net_dev->mc_count;
- i++, mclist = mclist->next) {
+
+ netdev_for_each_mc_addr(mclist, net_dev) {
unsigned int bit_nr =
sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index a85efcfd9d0e..e8387d25f24a 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -557,7 +557,7 @@ static void ess_send_alc_req(struct s_smc *smc)
/*
* send never allocation request where the requested payload and
- * overhead is zero or deallocate bandwidht when no bandwidth is
+ * overhead is zero or deallocate bandwidth when no bandwidth is
* parsed
*/
if (!smc->mib.fddiESSPayload) {
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a728503..d9016b75abc2 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -78,13 +78,13 @@ static const char * const boot_msg =
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
extern void mac_drv_clear_rx_queue(struct s_smc *smc);
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
-static struct pci_device_id skfddi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
goto fail;
}
read_address(smc, NULL);
- pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
- smc->hw.fddi_canon_addr.a[0],
- smc->hw.fddi_canon_addr.a[1],
- smc->hw.fddi_canon_addr.a[2],
- smc->hw.fddi_canon_addr.a[3],
- smc->hw.fddi_canon_addr.a[4],
- smc->hw.fddi_canon_addr.a[5]);
+ pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
@@ -858,8 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
- struct dev_mc_list *dmi; /* ptr to multicast addr entry */
- int i;
+ struct dev_mc_list *dmi;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
@@ -878,29 +871,19 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
- } else if (dev->mc_count > 0) {
- if (dev->mc_count <= FPMAX_MULTICAST) {
+ } else if (!netdev_mc_empty(dev)) {
+ if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
// point to first multicast addr
- dmi = dev->mc_list;
-
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
mac_add_multicast(smc,
(struct fddi_addr *)dmi->dmi_addr,
1);
- pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
- pr_debug(" %02x %02x %02x ",
- dmi->dmi_addr[0],
- dmi->dmi_addr[1],
- dmi->dmi_addr[2]);
- pr_debug("%02x %02x %02x\n",
- dmi->dmi_addr[3],
- dmi->dmi_addr[4],
- dmi->dmi_addr[5]);
- dmi = dmi->next;
- } // for
+ pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
+ dmi->dmi_addr);
+ }
} else { // more MC addresses than HW supports
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc00163..50eb70609f20 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -23,6 +23,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -40,13 +42,13 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/mii.h>
+#include <linux/slab.h>
#include <asm/irq.h>
#include "skge.h"
#define DRV_NAME "skge"
#define DRV_VERSION "1.13"
-#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
#define DEFAULT_RX_RING_SIZE 512
@@ -70,15 +72,15 @@ MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-static const u32 default_msg
- = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
- | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
+static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN);
static int debug = -1; /* defaults above */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static const struct pci_device_id skge_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
@@ -187,8 +189,8 @@ static void skge_wol_init(struct skge_port *skge)
/* Force to 10/100 skge_reset will re-enable on resume */
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
- PHY_AN_100FULL | PHY_AN_100HALF |
- PHY_AN_10FULL | PHY_AN_10HALF| PHY_AN_CSMA);
+ (PHY_AN_100FULL | PHY_AN_100HALF |
+ PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
/* no 1000 HD/FD */
gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
gm_phy_write(hw, port, PHY_MARV_CTRL,
@@ -257,25 +259,28 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
u32 supported;
if (hw->copper) {
- supported = SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg| SUPPORTED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
if (hw->chip_id == CHIP_ID_GENESIS)
- supported &= ~(SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full);
+ supported &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
else if (hw->chip_id == CHIP_ID_YUKON)
supported &= ~SUPPORTED_1000baseT_Half;
} else
- supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
- | SUPPORTED_FIBRE | SUPPORTED_Autoneg;
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
return supported;
}
@@ -365,7 +370,7 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
}
- return (0);
+ return 0;
}
static void skge_get_drvinfo(struct net_device *dev,
@@ -812,7 +817,7 @@ static int skge_get_eeprom_len(struct net_device *dev)
u32 reg2;
pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
- return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+ return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
}
static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
@@ -1043,7 +1048,7 @@ static int skge_rx_fill(struct net_device *dev)
skb_reserve(skb, NET_IP_ALIGN);
skge_rx_setup(skge, e, skb, skge->rx_buf_size);
- } while ( (e = e->next) != ring->start);
+ } while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
return 0;
@@ -1051,7 +1056,7 @@ static int skge_rx_fill(struct net_device *dev)
static const char *skge_pause(enum pause_status status)
{
- switch(status) {
+ switch (status) {
case FLOW_STAT_NONE:
return "none";
case FLOW_STAT_REM_SEND:
@@ -1074,13 +1079,11 @@ static void skge_link_up(struct skge_port *skge)
netif_carrier_on(skge->netdev);
netif_wake_queue(skge->netdev);
- if (netif_msg_link(skge)) {
- printk(KERN_INFO PFX
- "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
- skge->netdev->name, skge->speed,
- skge->duplex == DUPLEX_FULL ? "full" : "half",
- skge_pause(skge->flow_status));
- }
+ netif_info(skge, link, skge->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ skge->speed,
+ skge->duplex == DUPLEX_FULL ? "full" : "half",
+ skge_pause(skge->flow_status));
}
static void skge_link_down(struct skge_port *skge)
@@ -1089,8 +1092,7 @@ static void skge_link_down(struct skge_port *skge)
netif_carrier_off(skge->netdev);
netif_stop_queue(skge->netdev);
- if (netif_msg_link(skge))
- printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
+ netif_info(skge, link, skge->netdev, "Link is down\n");
}
@@ -1132,8 +1134,7 @@ static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
{
u16 v = 0;
if (__xm_phy_read(hw, port, reg, &v))
- printk(KERN_WARNING PFX "%s: phy read timed out\n",
- hw->dev[port]->name);
+ pr_warning("%s: phy read timed out\n", hw->dev[port]->name);
return v;
}
@@ -1255,8 +1256,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
if (lpa & PHY_B_AN_RF) {
- printk(KERN_NOTICE PFX "%s: remote fault\n",
- dev->name);
+ netdev_notice(dev, "remote fault\n");
return;
}
@@ -1271,8 +1271,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
skge->duplex = DUPLEX_HALF;
break;
default:
- printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
- dev->name);
+ netdev_notice(dev, "duplex mismatch\n");
return;
}
@@ -1327,7 +1326,7 @@ static void bcom_phy_init(struct skge_port *skge)
/* Optimize MDIO transfer by suppressing preamble. */
r = xm_read16(hw, port, XM_MMU_CMD);
r |= XM_MMU_NO_PRE;
- xm_write16(hw, port, XM_MMU_CMD,r);
+ xm_write16(hw, port, XM_MMU_CMD, r);
switch (id1) {
case PHY_BCOM_ID1_C0:
@@ -1464,8 +1463,7 @@ static int xm_check_link(struct net_device *dev)
lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
if (lpa & PHY_B_AN_RF) {
- printk(KERN_NOTICE PFX "%s: remote fault\n",
- dev->name);
+ netdev_notice(dev, "remote fault\n");
return 0;
}
@@ -1480,8 +1478,7 @@ static int xm_check_link(struct net_device *dev)
skge->duplex = DUPLEX_HALF;
break;
default:
- printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
- dev->name);
+ netdev_notice(dev, "duplex mismatch\n");
return 0;
}
@@ -1519,7 +1516,7 @@ static void xm_link_timer(unsigned long arg)
{
struct skge_port *skge = (struct skge_port *) arg;
struct net_device *dev = skge->netdev;
- struct skge_hw *hw = skge->hw;
+ struct skge_hw *hw = skge->hw;
int port = skge->port;
int i;
unsigned long flags;
@@ -1538,7 +1535,7 @@ static void xm_link_timer(unsigned long arg)
goto link_down;
}
- /* Re-enable interrupt to detect link down */
+ /* Re-enable interrupt to detect link down */
if (xm_check_link(dev)) {
u16 msk = xm_read16(hw, port, XM_IMSK);
msk &= ~XM_IS_INP_ASS;
@@ -1569,7 +1566,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
udelay(1);
}
- printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
+ netdev_warn(dev, "genesis reset failed\n");
reset_ok:
/* Unreset the XMAC. */
@@ -1595,7 +1592,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
}
- switch(hw->phy_type) {
+ switch (hw->phy_type) {
case SK_PHY_XMAC:
xm_phy_init(skge);
break;
@@ -1702,7 +1699,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
if (jumbo) {
/* Enable frame flushing if jumbo frames used */
- skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
} else {
/* enable timeout timers if normal frames */
skge_write16(hw, B3_PA_CTRL,
@@ -1717,7 +1714,7 @@ static void genesis_stop(struct skge_port *skge)
unsigned retries = 1000;
u16 cmd;
- /* Disable Tx and Rx */
+ /* Disable Tx and Rx */
cmd = xm_read16(hw, port, XM_MMU_CMD);
cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
xm_write16(hw, port, XM_MMU_CMD, cmd);
@@ -1792,12 +1789,11 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u16 status = xm_read16(hw, port, XM_ISRC);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
- xm_link_down(hw, port);
+ xm_link_down(hw, port);
mod_timer(&skge->link_timer, jiffies + 1);
}
@@ -1831,7 +1827,7 @@ static void genesis_link_up(struct skge_port *skge)
xm_write16(hw, port, XM_MMU_CMD, cmd);
mode = xm_read32(hw, port, XM_MODE);
- if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
+ if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
skge->flow_status == FLOW_STAT_LOC_SEND) {
/*
* Configure Pause Frame Generation
@@ -1898,12 +1894,11 @@ static inline void bcom_phy_intr(struct skge_port *skge)
u16 isrc;
isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
- skge->netdev->name, isrc);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x\n", isrc);
if (isrc & PHY_B_IS_PSE)
- printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
+ pr_err("%s: uncorrectable pair swap error\n",
hw->dev[port]->name);
/* Workaround BCom Errata:
@@ -1936,8 +1931,7 @@ static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
return 0;
}
- printk(KERN_WARNING PFX "%s: phy write timeout\n",
- hw->dev[port]->name);
+ pr_warning("%s: phy write timeout\n", hw->dev[port]->name);
return -EIO;
}
@@ -1965,8 +1959,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
{
u16 v = 0;
if (__gm_phy_read(hw, port, reg, &v))
- printk(KERN_WARNING PFX "%s: phy read timeout\n",
- hw->dev[port]->name);
+ pr_warning("%s: phy read timeout\n", hw->dev[port]->name);
return v;
}
@@ -2298,9 +2291,8 @@ static void yukon_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (status & GM_IS_RX_FF_OR) {
++dev->stats.rx_fifo_errors;
@@ -2379,9 +2371,8 @@ static void yukon_phy_intr(struct skge_port *skge)
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
- skge->netdev->name, istatus, phystat);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
@@ -2441,8 +2432,7 @@ static void yukon_phy_intr(struct skge_port *skge)
}
return;
failed:
- printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n",
- skge->netdev->name, reason);
+ pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
/* XXX restart autonegotiation? */
}
@@ -2480,7 +2470,7 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -ENODEV; /* Phy still in reset */
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = hw->phy_addr;
@@ -2571,8 +2561,7 @@ static int skge_up(struct net_device *dev)
if (!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
- if (netif_msg_ifup(skge))
- printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ netif_info(skge, ifup, skge->netdev, "enabling interface\n");
if (dev->mtu > RX_BUF_SIZE)
skge->rx_buf_size = dev->mtu + ETH_HLEN;
@@ -2670,8 +2659,7 @@ static int skge_down(struct net_device *dev)
if (skge->mem == NULL)
return 0;
- if (netif_msg_ifdown(skge))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
+ netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
netif_tx_disable(dev);
@@ -2781,7 +2769,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
* does. Looks like hardware is wrong?
*/
if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
- hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
+ hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
control = BMU_TCP_CHECK;
else
control = BMU_UDP_CHECK;
@@ -2793,7 +2781,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
control = BMU_CHECK;
if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
- control |= BMU_EOF| BMU_IRQ_EOF;
+ control |= BMU_EOF | BMU_IRQ_EOF;
else {
struct skge_tx_desc *tf = td;
@@ -2825,15 +2813,15 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
- if (unlikely(netif_msg_tx_queued(skge)))
- printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
- dev->name, e - skge->tx_ring.start, skb->len);
+ netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
+ "tx queued, slot %td, len %d\n",
+ e - skge->tx_ring.start, skb->len);
skge->tx_ring.to_use = e->next;
smp_wmb();
if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
- pr_debug("%s: transmit queue full\n", dev->name);
+ netdev_dbg(dev, "transmit queue full\n");
netif_stop_queue(dev);
}
@@ -2858,9 +2846,8 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
- skge->netdev->name, e - skge->tx_ring.start);
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n", e - skge->tx_ring.start);
dev_kfree_skb(e->skb);
}
@@ -2885,8 +2872,7 @@ static void skge_tx_timeout(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_timer(skge))
- printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
+ netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
skge_tx_clean(dev);
@@ -2932,8 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- int i, count = dev->mc_count;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
u32 mode;
u8 filter[8];
@@ -2953,7 +2938,7 @@ static void genesis_set_multicast(struct net_device *dev)
skge->flow_status == FLOW_STAT_SYMMETRIC)
genesis_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < count; i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
genesis_add_filter(filter, list->dmi_addr);
}
@@ -2972,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
@@ -2987,16 +2972,15 @@ static void yukon_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI) /* all multicast */
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
+ else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
reg &= ~GM_RXCR_MCF_ENA;
else {
- int i;
reg |= GM_RXCR_MCF_ENA;
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
yukon_add_filter(filter, list->dmi_addr);
}
@@ -3054,10 +3038,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
struct sk_buff *skb;
u16 len = control & BMU_BBC;
- if (unlikely(netif_msg_rx_status(skge)))
- printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
- dev->name, e - skge->rx_ring.start,
- status, len);
+ netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
+ "rx slot %td status 0x%x len %d\n",
+ e - skge->rx_ring.start, status, len);
if (len > skge->rx_buf_size)
goto error;
@@ -3096,7 +3079,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
pci_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
- prefetch(skb->data);
+ prefetch(skb->data);
skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
@@ -3111,10 +3094,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
return skb;
error:
- if (netif_msg_rx_err(skge))
- printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
- dev->name, e - skge->rx_ring.start,
- control, status);
+ netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
+ "rx err, slot %td control 0x%x status 0x%x\n",
+ e - skge->rx_ring.start, control, status);
if (skge->hw->chip_id == CHIP_ID_GENESIS) {
if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
@@ -3574,8 +3556,7 @@ static int skge_reset(struct skge_hw *hw)
hw->ram_offset = 0x80000;
} else
hw->ram_size = t8 * 512;
- }
- else if (t8 == 0)
+ } else if (t8 == 0)
hw->ram_size = 0x20000;
else
hw->ram_size = t8 * 4096;
@@ -3729,7 +3710,7 @@ static int skge_device_event(struct notifier_block *unused,
goto done;
skge = netdev_priv(dev);
- switch(event) {
+ switch (event) {
case NETDEV_CHANGENAME:
if (skge->debugfs) {
d = debugfs_rename(skge_debug, skge->debugfs,
@@ -3737,7 +3718,7 @@ static int skge_device_event(struct notifier_block *unused,
if (d)
skge->debugfs = d;
else {
- pr_info(PFX "%s: rename failed\n", dev->name);
+ netdev_info(dev, "rename failed\n");
debugfs_remove(skge->debugfs);
}
}
@@ -3755,8 +3736,7 @@ static int skge_device_event(struct notifier_block *unused,
skge_debug, dev,
&skge_debug_fops);
if (!d || IS_ERR(d))
- pr_info(PFX "%s: debugfs create failed\n",
- dev->name);
+ netdev_info(dev, "debugfs create failed\n");
else
skge->debugfs = d;
break;
@@ -3777,7 +3757,7 @@ static __init void skge_debug_init(void)
ent = debugfs_create_dir("skge", NULL);
if (!ent || IS_ERR(ent)) {
- pr_info(PFX "debugfs create directory failed\n");
+ pr_info("debugfs create directory failed\n");
return;
}
@@ -3885,9 +3865,7 @@ static void __devinit skge_show_addr(struct net_device *dev)
{
const struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_probe(skge))
- printk(KERN_INFO PFX "%s: addr %pM\n",
- dev->name, dev->dev_addr);
+ netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
}
static int __devinit skge_probe(struct pci_dev *pdev,
@@ -3937,7 +3915,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
err = -ENOMEM;
/* space for skge@pci:0000:04:00.0 */
- hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
if (!hw) {
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
@@ -3960,9 +3938,10 @@ static int __devinit skge_probe(struct pci_dev *pdev,
if (err)
goto err_out_iounmap;
- printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
- (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
- skge_board_name(hw), hw->chip_rev);
+ pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
+ DRV_VERSION,
+ (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
+ skge_board_name(hw), hw->chip_rev);
dev = skge_devinit(hw, 0, using_dac);
if (!dev)
@@ -4032,7 +4011,8 @@ static void __devexit skge_remove(struct pci_dev *pdev)
flush_scheduled_work();
- if ((dev1 = hw->dev[1]))
+ dev1 = hw->dev[1];
+ if (dev1)
unregister_netdev(dev1);
dev0 = hw->dev[0];
unregister_netdev(dev0);
@@ -4119,8 +4099,7 @@ static int skge_resume(struct pci_dev *pdev)
err = skge_up(dev);
if (err) {
- printk(KERN_ERR PFX "%s: could not up: %d\n",
- dev->name, err);
+ netdev_err(dev, "could not up: %d\n", err);
dev_close(dev);
goto out;
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 1c01b96c9611..088c797eb73b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -22,6 +22,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -31,6 +33,7 @@
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/ip.h>
+#include <linux/slab.h>
#include <net/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
@@ -50,8 +53,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.26"
-#define PFX DRV_NAME " "
+#define DRV_VERSION "1.27"
/*
* The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -251,6 +253,8 @@ static void sky2_power_on(struct sky2_hw *hw)
sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
+
/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
reg = sky2_read32(hw, B2_GP_IO);
reg |= GLB_GPIO_STAT_RACE_DIS;
@@ -644,6 +648,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
{
u32 reg1;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
@@ -651,6 +656,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_pci_read32(hw, PCI_DEV_REG1);
if (hw->chip_id == CHIP_ID_YUKON_FE)
@@ -707,9 +713,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
}
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
/* Force a renegotiation */
@@ -727,7 +735,6 @@ static void sky2_wol_init(struct sky2_port *sky2)
unsigned port = sky2->port;
enum flow_control save_mode;
u16 ctrl;
- u32 reg1;
/* Bring hardware out of reset */
sky2_write16(hw, B0_CTST, CS_RST_CLR);
@@ -778,14 +785,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
- /* Turn on legacy PCI-Express PME mode */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- reg1 |= PCI_Y2_PME_LEGACY;
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ /* Disable PiG firmware */
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
/* block receiver */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
-
}
static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -796,29 +800,15 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
hw->chip_rev != CHIP_REV_YU_EX_A0) ||
hw->chip_id >= CHIP_ID_YUKON_FE_P) {
/* Yukon-Extreme B0 and further Extreme devices */
- /* enable Store & Forward mode for TX */
-
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_DIS | TX_STFW_ENA);
-
- else
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_ENA| TX_STFW_ENA);
- } else {
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
- else {
- /* set Tx GMAC FIFO Almost Empty Threshold */
- sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
- (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+ } else if (dev->mtu > ETH_DATA_LEN) {
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
-
- /* Can't do offload because of lack of store/forward */
- dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
- }
- }
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
}
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -1021,11 +1011,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
{
struct sky2_tx_le *le = sky2->tx_le + *slot;
- struct tx_ring_info *re = sky2->tx_ring + *slot;
*slot = RING_NEXT(*slot, sky2->tx_ring_size);
- re->flags = 0;
- re->skb = NULL;
le->ctrl = 0;
return le;
}
@@ -1064,6 +1051,40 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
return le;
}
+static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
+{
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ /* Stopping point for hardware truncation */
+ return (size - 8) / sizeof(u32);
+}
+
+static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
+{
+ struct rx_ring_info *re;
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ sky2->rx_nfrags = size >> PAGE_SHIFT;
+ BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
+
+ /* Compute residue after pages */
+ size -= sky2->rx_nfrags << PAGE_SHIFT;
+
+ /* Optimize to handle small packets and headers */
+ if (size < copybreak)
+ size = copybreak;
+ if (size < ETH_HLEN)
+ size = ETH_HLEN;
+
+ return size;
+}
+
/* Build description to hardware for one receive segment */
static void sky2_rx_add(struct sky2_port *sky2, u8 op,
dma_addr_t map, unsigned len)
@@ -1102,18 +1123,39 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
int i;
re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, re->data_addr)))
- return -EIO;
+ if (pci_dma_mapping_error(pdev, re->data_addr))
+ goto mapping_error;
pci_unmap_len_set(re, data_size, size);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- re->frag_addr[i] = pci_map_page(pdev,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ re->frag_addr[i] = pci_map_page(pdev, frag->page,
+ frag->page_offset,
+ frag->size,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
+ goto map_page_error;
+ }
return 0;
+
+map_page_error:
+ while (--i >= 0) {
+ pci_unmap_page(pdev, re->frag_addr[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ PCI_DMA_FROMDEVICE);
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
}
static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
@@ -1172,8 +1214,7 @@ static void sky2_rx_stop(struct sky2_port *sky2)
== sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
goto stopped;
- printk(KERN_WARNING PFX "%s: receiver stop failed\n",
- sky2->netdev->name);
+ netdev_warn(sky2->netdev, "receiver stop failed\n");
stopped:
sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
@@ -1323,8 +1364,32 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
}
+static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned i;
+
+ sky2->rx_data_size = sky2_get_rx_data_size(sky2);
+
+ /* Fill Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ re->skb = sky2_rx_alloc(sky2);
+ if (!re->skb)
+ return -ENOMEM;
+
+ if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
+ dev_kfree_skb(re->skb);
+ re->skb = NULL;
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
/*
- * Allocate and setup receiver buffer pool.
+ * Setup receiver buffer pool.
* Normal case this ends up creating one list element for skb
* in the receive ring. Worst case if using large MTU and each
* allocation falls on a different 64 bit region, that results
@@ -1332,12 +1397,12 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
* One element is used for checksum enable/disable, and one
* extra to avoid wrap.
*/
-static int sky2_rx_start(struct sky2_port *sky2)
+static void sky2_rx_start(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
struct rx_ring_info *re;
unsigned rxq = rxqaddr[sky2->port];
- unsigned i, size, thresh;
+ unsigned i, thresh;
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
@@ -1358,40 +1423,9 @@ static int sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
- /* Space needed for frame data + headers rounded up */
- size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
-
- /* Stopping point for hardware truncation */
- thresh = (size - 8) / sizeof(u32);
-
- sky2->rx_nfrags = size >> PAGE_SHIFT;
- BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
-
- /* Compute residue after pages */
- size -= sky2->rx_nfrags << PAGE_SHIFT;
-
- /* Optimize to handle small packets and headers */
- if (size < copybreak)
- size = copybreak;
- if (size < ETH_HLEN)
- size = ETH_HLEN;
-
- sky2->rx_data_size = size;
-
- /* Fill Rx ring */
+ /* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
-
- re->skb = sky2_rx_alloc(sky2);
- if (!re->skb)
- goto nomem;
-
- if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
- dev_kfree_skb(re->skb);
- re->skb = NULL;
- goto nomem;
- }
-
sky2_rx_submit(sky2, re);
}
@@ -1401,6 +1435,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
* the register is limited to 9 bits, so if you do frames > 2052
* you better get the MTU right!
*/
+ thresh = sky2_get_rx_threshold(sky2);
if (thresh > 0x1ff)
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
else {
@@ -1432,13 +1467,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
}
-
-
-
- return 0;
-nomem:
- sky2_rx_clean(sky2);
- return -ENOMEM;
}
static int sky2_alloc_buffers(struct sky2_port *sky2)
@@ -1469,7 +1497,7 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
if (!sky2->rx_ring)
goto nomem;
- return 0;
+ return sky2_alloc_rx_skbs(sky2);
nomem:
return -ENOMEM;
}
@@ -1478,6 +1506,8 @@ static void sky2_free_buffers(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
+ sky2_rx_clean(sky2);
+
if (sky2->rx_le) {
pci_free_consistent(hw->pdev, RX_LE_BYTES,
sky2->rx_le, sky2->rx_le_map);
@@ -1496,16 +1526,16 @@ static void sky2_free_buffers(struct sky2_port *sky2)
sky2->rx_ring = NULL;
}
-/* Bring up network interface. */
-static int sky2_up(struct net_device *dev)
+static void sky2_hw_up(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 imask, ramsize;
- int cap, err;
+ u32 ramsize;
+ int cap;
struct net_device *otherdev = hw->dev[sky2->port^1];
+ tx_init(sky2);
+
/*
* On dual port PCI-X card, there is an problem where status
* can be received out of order due to split transactions
@@ -1517,16 +1547,7 @@ static int sky2_up(struct net_device *dev)
cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
cmd &= ~PCI_X_CMD_MAX_SPLIT;
sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
-
- }
-
- netif_carrier_off(dev);
-
- err = sky2_alloc_buffers(sky2);
- if (err)
- goto err_out;
-
- tx_init(sky2);
+ }
sky2_mac_init(hw, port);
@@ -1535,7 +1556,7 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
- pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
+ netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
else
@@ -1567,18 +1588,33 @@ static int sky2_up(struct net_device *dev)
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
- err = sky2_rx_start(sky2);
+ sky2_rx_start(sky2);
+}
+
+/* Bring up network interface. */
+static int sky2_up(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 imask;
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = sky2_alloc_buffers(sky2);
if (err)
goto err_out;
+ sky2_hw_up(sky2);
+
/* Enable interrupts from phy/mac for port */
imask = sky2_read32(hw, B0_IMSK);
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_IMSK);
- if (netif_msg_ifup(sky2))
- printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ netif_info(sky2, ifup, dev, "enabling interface\n");
return 0;
@@ -1618,8 +1654,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
return count;
}
-static void sky2_tx_unmap(struct pci_dev *pdev,
- const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1629,6 +1664,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
pci_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
+ re->flags = 0;
}
/*
@@ -1661,9 +1697,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
goto mapping_error;
slot = sky2->tx_prod;
- if (unlikely(netif_msg_tx_queued(sky2)))
- printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
- dev->name, slot, skb->len);
+ netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
+ "tx queued, slot %u, len %d\n", slot, skb->len);
/* Send high bits if needed */
upper = upper_32_bits(mapping);
@@ -1828,13 +1863,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2_tx_unmap(sky2->hw->pdev, re);
if (skb) {
- if (unlikely(netif_msg_tx_done(sky2)))
- printk(KERN_DEBUG "%s: tx done %u\n",
- dev->name, idx);
+ netif_printk(sky2, tx_done, KERN_DEBUG, dev,
+ "tx done %u\n", idx);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ re->skb = NULL;
dev_kfree_skb_any(skb);
sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
@@ -1843,9 +1878,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
-
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
- netif_wake_queue(dev);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -1870,21 +1902,11 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
}
-/* Network shutdown */
-static int sky2_down(struct net_device *dev)
+static void sky2_hw_down(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u16 ctrl;
- u32 imask;
-
- /* Never really got started! */
- if (!sky2->tx_le)
- return 0;
-
- if (netif_msg_ifdown(sky2))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
/* Force flow control off */
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -1917,15 +1939,6 @@ static int sky2_down(struct net_device *dev)
sky2_rx_stop(sky2);
- /* Disable port IRQ */
- imask = sky2_read32(hw, B0_IMSK);
- imask &= ~portirq_msk[port];
- sky2_write32(hw, B0_IMSK, imask);
- sky2_read32(hw, B0_IMSK);
-
- synchronize_irq(hw->pdev->irq);
- napi_synchronize(&hw->napi);
-
spin_lock_bh(&sky2->phy_lock);
sky2_phy_power_down(hw, port);
spin_unlock_bh(&sky2->phy_lock);
@@ -1934,8 +1947,29 @@ static int sky2_down(struct net_device *dev)
/* Free any pending frames stuck in HW queue */
sky2_tx_complete(sky2, sky2->tx_prod);
+}
- sky2_rx_clean(sky2);
+/* Network shutdown */
+static int sky2_down(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ /* Never really got started! */
+ if (!sky2->tx_le)
+ return 0;
+
+ netif_info(sky2, ifdown, dev, "disabling interface\n");
+
+ /* Disable port IRQ */
+ sky2_write32(hw, B0_IMSK,
+ sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
+ sky2_read32(hw, B0_IMSK);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+
+ sky2_hw_down(sky2);
sky2_free_buffers(sky2);
@@ -1991,12 +2025,11 @@ static void sky2_link_up(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, LNK_LED_REG),
LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
- if (netif_msg_link(sky2))
- printk(KERN_INFO PFX
- "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
- sky2->netdev->name, sky2->speed,
- sky2->duplex == DUPLEX_FULL ? "full" : "half",
- fc_name[sky2->flow_status]);
+ netif_info(sky2, link, sky2->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ sky2->speed,
+ sky2->duplex == DUPLEX_FULL ? "full" : "half",
+ fc_name[sky2->flow_status]);
}
static void sky2_link_down(struct sky2_port *sky2)
@@ -2016,8 +2049,7 @@ static void sky2_link_down(struct sky2_port *sky2)
/* Turn off link LED */
sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
- if (netif_msg_link(sky2))
- printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
+ netif_info(sky2, link, sky2->netdev, "Link is down\n");
sky2_phy_init(hw, port);
}
@@ -2039,13 +2071,12 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
if (lpa & PHY_M_AN_RF) {
- printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
+ netdev_err(sky2->netdev, "remote fault\n");
return -1;
}
if (!(aux & PHY_M_PS_SPDUP_RES)) {
- printk(KERN_ERR PFX "%s: speed/duplex mismatch",
- sky2->netdev->name);
+ netdev_err(sky2->netdev, "speed/duplex mismatch\n");
return -1;
}
@@ -2107,9 +2138,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (netif_msg_intr(sky2))
- printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
- sky2->netdev->name, istatus, phystat);
+ netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
+ istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
if (sky2_autoneg_done(sky2, phystat) == 0)
@@ -2148,7 +2178,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
/* reset PHY Link Detect */
phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_link_up(sky2);
}
@@ -2161,13 +2193,12 @@ static void sky2_tx_timeout(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
- if (netif_msg_timer(sky2))
- printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
+ netif_err(sky2, timer, dev, "tx timeout\n");
- printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
- dev->name, sky2->tx_cons, sky2->tx_prod,
- sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
- sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
+ netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
+ sky2->tx_cons, sky2->tx_prod,
+ sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+ sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
/* can't restart safely under softirq */
schedule_work(&hw->restart_work);
@@ -2182,14 +2213,20 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u16 ctl, mode;
u32 imask;
+ /* MTU size outside the spec */
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
+ /* MTU > 1500 on yukon FE and FE+ not allowed */
if (new_mtu > ETH_DATA_LEN &&
(hw->chip_id == CHIP_ID_YUKON_FE ||
hw->chip_id == CHIP_ID_YUKON_FE_P))
return -EINVAL;
+ /* TSO, etc on Yukon Ultra and MTU > 1500 not supported */
+ if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
+ dev->features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
+
if (!netif_running(dev)) {
dev->mtu = new_mtu;
return 0;
@@ -2224,7 +2261,11 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
- err = sky2_rx_start(sky2);
+ err = sky2_alloc_rx_skbs(sky2);
+ if (!err)
+ sky2_rx_start(sky2);
+ else
+ sky2_rx_clean(sky2);
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2301,30 +2342,32 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
struct rx_ring_info *re,
unsigned int length)
{
- struct sk_buff *skb, *nskb;
+ struct sk_buff *skb;
+ struct rx_ring_info nre;
unsigned hdr_space = sky2->rx_data_size;
- /* Don't be tricky about reusing pages (yet) */
- nskb = sky2_rx_alloc(sky2);
- if (unlikely(!nskb))
- return NULL;
+ nre.skb = sky2_rx_alloc(sky2);
+ if (unlikely(!nre.skb))
+ goto nobuf;
+
+ if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+ goto nomap;
skb = re->skb;
sky2_rx_unmap_skb(sky2->hw->pdev, re);
-
prefetch(skb->data);
- re->skb = nskb;
- if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
- dev_kfree_skb(nskb);
- re->skb = skb;
- return NULL;
- }
+ *re = nre;
if (skb_shinfo(skb)->nr_frags)
skb_put_frags(skb, hdr_space, length);
else
skb_put(skb, length);
return skb;
+
+nomap:
+ dev_kfree_skb(nre.skb);
+nobuf:
+ return NULL;
}
/*
@@ -2345,9 +2388,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
count -= VLAN_HLEN;
#endif
- if (unlikely(netif_msg_rx_status(sky2)))
- printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
- dev->name, sky2->rx_next, status, length);
+ netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+ "rx slot %u status 0x%x len %d\n",
+ sky2->rx_next, status, length);
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
@@ -2376,6 +2419,9 @@ okay:
skb = receive_copy(sky2, re, length);
else
skb = receive_new(sky2, re, length);
+
+ dev->stats.rx_dropped += (skb == NULL);
+
resubmit:
sky2_rx_submit(sky2, re);
@@ -2385,9 +2431,10 @@ len_error:
/* Truncation of overlength packets
causes PHY length to not match MAC length */
++dev->stats.rx_length_errors;
- if (netif_msg_rx_err(sky2) && net_ratelimit())
- pr_info(PFX "%s: rx length error: status %#x length %d\n",
- dev->name, status, length);
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx length error: status %#x length %d\n",
+ status, length);
goto resubmit;
error:
@@ -2397,9 +2444,9 @@ error:
goto resubmit;
}
- if (netif_msg_rx_err(sky2) && net_ratelimit())
- printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
- dev->name, status, length);
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx error, status 0x%x length %d\n", status, length);
if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
dev->stats.rx_length_errors++;
@@ -2416,8 +2463,13 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
{
struct sky2_port *sky2 = netdev_priv(dev);
- if (netif_running(dev))
+ if (netif_running(dev)) {
sky2_tx_complete(sky2, last);
+
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ netif_wake_queue(dev);
+ }
}
static inline void sky2_skb_rx(const struct sky2_port *sky2,
@@ -2453,6 +2505,32 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
}
}
+static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+{
+ /* If this happens then driver assuming wrong format for chip type */
+ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
+
+ /* Both checksum counters are programmed to start at
+ * the same offset, so unless there is a problem they
+ * should match. This failure is an early indication that
+ * hardware receive checksumming won't work.
+ */
+ if (likely((u16)(status >> 16) == (u16)status)) {
+ struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = le16_to_cpu(status);
+ } else {
+ dev_notice(&sky2->hw->pdev->dev,
+ "%s: receive checksum problem (status = %#x)\n",
+ sky2->netdev->name, status);
+
+ /* Disable checksum offload */
+ sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_CHKSUM);
+ }
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2487,11 +2565,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXSTAT:
total_packets[port]++;
total_bytes[port] += length;
+
skb = sky2_receive(dev, length, status);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ if (!skb)
break;
- }
/* This chip reports checksum status differently */
if (hw->flags & SKY2_HW_NEW_LE) {
@@ -2522,37 +2599,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
/* fall through */
#endif
case OP_RXCHKS:
- if (!(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
- break;
-
- /* If this happens then driver assuming wrong format */
- if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
- if (net_ratelimit())
- printk(KERN_NOTICE "%s: unexpected"
- " checksum status\n",
- dev->name);
- break;
- }
-
- /* Both checksum counters are programmed to start at
- * the same offset, so unless there is a problem they
- * should match. This failure is an early indication that
- * hardware receive checksumming won't work.
- */
- if (likely(status >> 16 == (status & 0xffff))) {
- skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = le16_to_cpu(status);
- } else {
- printk(KERN_NOTICE PFX "%s: hardware receive "
- "checksum problem (status = %#x)\n",
- dev->name, status);
- sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
-
- sky2_write32(sky2->hw,
- Q_ADDR(rxqaddr[port], Q_CSR),
- BMU_DIS_RX_CHKSUM);
- }
+ if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
+ sky2_rx_checksum(sky2, status);
break;
case OP_TXINDEXLE:
@@ -2566,8 +2614,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
default:
if (net_ratelimit())
- printk(KERN_WARNING PFX
- "unknown status opcode 0x%x\n", opcode);
+ pr_warning("unknown status opcode 0x%x\n", opcode);
}
} while (hw->st_idx != idx);
@@ -2586,41 +2633,37 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
struct net_device *dev = hw->dev[port];
if (net_ratelimit())
- printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
- dev->name, status);
+ netdev_info(dev, "hw error interrupt status 0x%x\n", status);
if (status & Y2_IS_PAR_RD1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: ram data read parity error\n",
- dev->name);
+ netdev_err(dev, "ram data read parity error\n");
/* Clear IRQ */
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
}
if (status & Y2_IS_PAR_WR1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: ram data write parity error\n",
- dev->name);
+ netdev_err(dev, "ram data write parity error\n");
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
}
if (status & Y2_IS_PAR_MAC1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
+ netdev_err(dev, "MAC parity error\n");
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
}
if (status & Y2_IS_PAR_RX1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
+ netdev_err(dev, "RX parity error\n");
sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
}
if (status & Y2_IS_TCP_TXA1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: TCP segmentation error\n",
- dev->name);
+ netdev_err(dev, "TCP segmentation error\n");
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
}
}
@@ -2639,6 +2682,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2646,12 +2690,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */
u32 err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
@@ -2659,6 +2705,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_HWE_L1_MASK)
@@ -2674,9 +2721,7 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
struct sky2_port *sky2 = netdev_priv(dev);
u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
- if (netif_msg_intr(sky2))
- printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
if (status & GM_IS_RX_CO_OV)
gma_read16(hw, port, GM_RX_IRQ_SRC);
@@ -2701,8 +2746,7 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
struct net_device *dev = hw->dev[port];
u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
- dev_err(&hw->pdev->dev, PFX
- "%s: descriptor error q=%#x get=%u put=%u\n",
+ dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
dev->name, (unsigned) q, (unsigned) idx,
(unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
@@ -2727,9 +2771,10 @@ static int sky2_rx_hung(struct net_device *dev)
/* Check if the PCI RX hang */
(fifo_rp == sky2->check.fifo_rp &&
fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
- printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n",
- dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp,
- sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
+ netdev_printk(KERN_DEBUG, dev,
+ "hung mac %d:%d fifo %d (%d:%d)\n",
+ mac_lev, mac_rp, fifo_lev,
+ fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
return 1;
} else {
sky2->check.last = dev->last_rx;
@@ -2760,8 +2805,7 @@ static void sky2_watchdog(unsigned long arg)
/* For chips with Rx FIFO, check if stuck */
if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
sky2_rx_hung(dev)) {
- pr_info(PFX "%s: receiver hang detected\n",
- dev->name);
+ netdev_info(dev, "receiver hang detected\n");
schedule_work(&hw->restart_work);
return;
}
@@ -3001,11 +3045,20 @@ static void sky2_reset(struct sky2_hw *hw)
u32 hwe_mask = Y2_HWE_ALL_MASK;
/* disable ASF */
- if (hw->chip_id == CHIP_ID_YUKON_EX) {
+ if (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ sky2_write32(hw, CPU_WDOG, 0);
status = sky2_read16(hw, HCU_CCSR);
status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
HCU_CCSR_UC_STATE_MSK);
+ /*
+ * CPU clock divider shouldn't be used because
+ * - ASF firmware may malfunction
+ * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
+ */
+ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
sky2_write16(hw, HCU_CCSR, status);
+ sky2_write32(hw, CPU_WDOG, 0);
} else
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
@@ -3037,6 +3090,7 @@ static void sky2_reset(struct sky2_hw *hw)
}
sky2_power_on(hw);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -3073,6 +3127,7 @@ static void sky2_reset(struct sky2_hw *hw)
reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
/* reset PHY Link Detect */
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write16(hw, PSM_CONFIG_REG4,
reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
@@ -3086,10 +3141,11 @@ static void sky2_reset(struct sky2_hw *hw)
/* check if PSMv2 was running before */
reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
if (reg & PCI_EXP_LNKCTL_ASPMC) {
- int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
/* restore the PCIe Link Control register */
sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
}
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
@@ -3176,7 +3232,9 @@ static void sky2_reset(struct sky2_hw *hw)
static void sky2_detach(struct net_device *dev)
{
if (netif_running(dev)) {
+ netif_tx_lock(dev);
netif_device_detach(dev); /* stop txq */
+ netif_tx_unlock(dev);
sky2_down(dev);
}
}
@@ -3189,8 +3247,7 @@ static int sky2_reattach(struct net_device *dev)
if (netif_running(dev)) {
err = sky2_up(dev);
if (err) {
- printk(KERN_INFO PFX "%s: could not restart %d\n",
- dev->name, err);
+ netdev_info(dev, "could not restart %d\n", err);
dev_close(dev);
} else {
netif_device_attach(dev);
@@ -3204,20 +3261,46 @@ static int sky2_reattach(struct net_device *dev)
static void sky2_restart(struct work_struct *work)
{
struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+ u32 imask;
int i;
rtnl_lock();
- for (i = 0; i < hw->ports; i++)
- sky2_detach(hw->dev[i]);
napi_disable(&hw->napi);
+ synchronize_irq(hw->pdev->irq);
+ imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ continue;
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ sky2_hw_down(sky2);
+ }
+
sky2_reset(hw);
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
- for (i = 0; i < hw->ports; i++)
- sky2_reattach(hw->dev[i]);
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ continue;
+
+ sky2_hw_up(sky2);
+ netif_wake_queue(dev);
+ }
+
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
rtnl_unlock();
}
@@ -3245,17 +3328,6 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
sky2->wol = wol->wolopts;
-
- if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_write32(hw, B0_CTST, sky2->wol
- ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
-
- device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
-
- if (!netif_running(dev))
- sky2_wol_init(sky2);
return 0;
}
@@ -3550,7 +3622,7 @@ static void sky2_set_multicast(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
u16 reg;
u8 filter[8];
int rx_pause;
@@ -3566,16 +3638,15 @@ static void sky2_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI)
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)
+ else if (netdev_mc_empty(dev) && !rx_pause)
reg &= ~GM_RXCR_MCF_ENA;
else {
- int i;
reg |= GM_RXCR_MCF_ENA;
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
sky2_add_filter(filter, list->dmi_addr);
}
@@ -3837,6 +3908,50 @@ static int sky2_get_regs_len(struct net_device *dev)
return 0x4000;
}
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+ /* This complicated switch statement is to make sure and
+ * only access regions that are unreserved.
+ * Some blocks are only valid on dual port cards.
+ */
+ switch (b) {
+ /* second port */
+ case 5: /* Tx Arbiter 2 */
+ case 9: /* RX2 */
+ case 14 ... 15: /* TX2 */
+ case 17: case 19: /* Ram Buffer 2 */
+ case 22 ... 23: /* Tx Ram Buffer 2 */
+ case 25: /* Rx MAC Fifo 1 */
+ case 27: /* Tx MAC Fifo 2 */
+ case 31: /* GPHY 2 */
+ case 40 ... 47: /* Pattern Ram 2 */
+ case 52: case 54: /* TCP Segmentation 2 */
+ case 112 ... 116: /* GMAC 2 */
+ return hw->ports > 1;
+
+ case 0: /* Control */
+ case 2: /* Mac address */
+ case 4: /* Tx Arbiter 1 */
+ case 7: /* PCI express reg */
+ case 8: /* RX1 */
+ case 12 ... 13: /* TX1 */
+ case 16: case 18:/* Rx Ram Buffer 1 */
+ case 20 ... 21: /* Tx Ram Buffer 1 */
+ case 24: /* Rx MAC Fifo 1 */
+ case 26: /* Tx MAC Fifo 1 */
+ case 28 ... 29: /* Descriptor and status unit */
+ case 30: /* GPHY 1*/
+ case 32 ... 39: /* Pattern Ram 1 */
+ case 48: case 50: /* TCP Segmentation 1 */
+ case 56 ... 60: /* PCI space */
+ case 80 ... 84: /* GMAC 1 */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
/*
* Returns copy of control register region
* Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3851,55 +3966,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (b = 0; b < 128; b++) {
- /* This complicated switch statement is to make sure and
- * only access regions that are unreserved.
- * Some blocks are only valid on dual port cards.
- * and block 3 has some special diagnostic registers that
- * are poison.
- */
- switch (b) {
- case 3:
- /* skip diagnostic ram region */
+ /* skip poisonous diagnostic ram region in block 3 */
+ if (b == 3)
memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
- break;
-
- /* dual port cards only */
- case 5: /* Tx Arbiter 2 */
- case 9: /* RX2 */
- case 14 ... 15: /* TX2 */
- case 17: case 19: /* Ram Buffer 2 */
- case 22 ... 23: /* Tx Ram Buffer 2 */
- case 25: /* Rx MAC Fifo 1 */
- case 27: /* Tx MAC Fifo 2 */
- case 31: /* GPHY 2 */
- case 40 ... 47: /* Pattern Ram 2 */
- case 52: case 54: /* TCP Segmentation 2 */
- case 112 ... 116: /* GMAC 2 */
- if (sky2->hw->ports == 1)
- goto reserved;
- /* fall through */
- case 0: /* Control */
- case 2: /* Mac address */
- case 4: /* Tx Arbiter 1 */
- case 7: /* PCI express reg */
- case 8: /* RX1 */
- case 12 ... 13: /* TX1 */
- case 16: case 18:/* Rx Ram Buffer 1 */
- case 20 ... 21: /* Tx Ram Buffer 1 */
- case 24: /* Rx MAC Fifo 1 */
- case 26: /* Tx MAC Fifo 1 */
- case 28 ... 29: /* Descriptor and status unit */
- case 30: /* GPHY 1*/
- case 32 ... 39: /* Pattern Ram 1 */
- case 48: case 50: /* TCP Segmentation 1 */
- case 56 ... 60: /* PCI space */
- case 80 ... 84: /* GMAC 1 */
+ else if (sky2_reg_access_ok(sky2->hw, b))
memcpy_fromio(p, io, 128);
- break;
- default:
-reserved:
+ else
memset(p, 0, 128);
- }
p += 128;
io += 128;
@@ -3951,7 +4024,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
/* Can take up to 10.6 ms for write */
if (time_after(jiffies, start + HZ/4)) {
- dev_err(&hw->pdev->dev, PFX "VPD cycle timed out");
+ dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
return -ETIMEDOUT;
}
mdelay(1);
@@ -4285,8 +4358,7 @@ static int sky2_device_event(struct notifier_block *unused,
case NETDEV_GOING_DOWN:
if (sky2->debugfs) {
- printk(KERN_DEBUG PFX "%s: remove debugfs\n",
- dev->name);
+ netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
debugfs_remove(sky2->debugfs);
sky2->debugfs = NULL;
}
@@ -4439,9 +4511,7 @@ static void __devinit sky2_show_addr(struct net_device *dev)
{
const struct sky2_port *sky2 = netdev_priv(dev);
- if (netif_msg_probe(sky2))
- printk(KERN_INFO PFX "%s: addr %pM\n",
- dev->name, dev->dev_addr);
+ netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
}
/* Handle software interrupt used during MSI test */
@@ -4684,6 +4754,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
+ pdev->d3_delay = 150;
return 0;
@@ -4746,7 +4817,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4771,6 +4841,8 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
wol |= sky2->wol;
}
+ device_set_wakeup_enable(&pdev->dev, wol != 0);
+
sky2_write32(hw, B0_IMSK, 0);
napi_disable(&hw->napi);
sky2_power_aux(hw);
@@ -4783,6 +4855,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
+#ifdef CONFIG_PM
static int sky2_resume(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4791,6 +4864,7 @@ static int sky2_resume(struct pci_dev *pdev)
if (!hw)
return 0;
+ rtnl_lock();
err = pci_set_power_state(pdev, PCI_D0);
if (err)
goto out;
@@ -4802,16 +4876,16 @@ static int sky2_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
/* Re-enable all clocks */
- if (hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto out;
+ }
sky2_reset(hw);
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
napi_enable(&hw->napi);
- rtnl_lock();
for (i = 0; i < hw->ports; i++) {
err = sky2_reattach(hw->dev[i]);
if (err)
@@ -4831,34 +4905,7 @@ out:
static void sky2_shutdown(struct pci_dev *pdev)
{
- struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, wol = 0;
-
- if (!hw)
- return;
-
- rtnl_lock();
- del_timer_sync(&hw->watchdog_timer);
-
- for (i = 0; i < hw->ports; i++) {
- struct net_device *dev = hw->dev[i];
- struct sky2_port *sky2 = netdev_priv(dev);
-
- if (sky2->wol) {
- wol = 1;
- sky2_wol_init(sky2);
- }
- }
-
- if (wol)
- sky2_power_aux(hw);
- rtnl_unlock();
-
- pci_enable_wake(pdev, PCI_D3hot, wol);
- pci_enable_wake(pdev, PCI_D3cold, wol);
-
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
+ sky2_suspend(pdev, PMSG_SUSPEND);
}
static struct pci_driver sky2_driver = {
@@ -4875,7 +4922,7 @@ static struct pci_driver sky2_driver = {
static int __init sky2_init_module(void)
{
- pr_info(PFX "driver version " DRV_VERSION "\n");
+ pr_info("driver version " DRV_VERSION "\n");
sky2_debug_init();
return pci_register_driver(&sky2_driver);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 365d79c7d834..a5e182dd9819 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1895,14 +1895,14 @@ enum {
/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
enum {
- TX_STFW_DIS = 1<<31,/* Disable Store & Forward (Yukon-EC Ultra) */
- TX_STFW_ENA = 1<<30,/* Enable Store & Forward (Yukon-EC Ultra) */
+ TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
+ TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
- TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
- TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+ TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
+ TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
@@ -2156,7 +2156,7 @@ struct tx_ring_info {
struct sk_buff *skb;
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
-#define TX_MAP_PAGE 000002
+#define TX_MAP_PAGE 0x0002
DECLARE_PCI_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index d640c0f5470b..140d63f3cafa 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -51,6 +51,7 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index ba5bbc503446..89696156c059 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -83,6 +83,7 @@
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include "slip.h"
#ifdef CONFIG_INET
#include <linux/ip.h>
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 44ebbaa7457b..635820d42b19 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -59,7 +59,6 @@ static const char version[] =
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
@@ -1323,7 +1322,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
mcr |= MAC_CR_MCPAS_;
}
@@ -1340,8 +1339,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* the number of the 32 bit register, while the low 5 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
- int i;
+ else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *cur_addr;
/* Set the Hash perfec mode */
@@ -1350,8 +1348,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ netdev_for_each_mc_addr(cur_addr, dev) {
u32 position;
/* do we have a pointer here? */
@@ -2017,10 +2014,8 @@ static int __devinit smc911x_probe(struct net_device *dev)
"set using ifconfig\n", dev->name);
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: ", dev->name);
- for (i = 0; i < 5; i++)
- printk("%2.2x:", dev->dev_addr[i]);
- printk("%2.2x\n", dev->dev_addr[5]);
+ printk("%s: Ethernet addr: %pM\n",
+ dev->name, dev->dev_addr);
}
if (lp->phy_type == 0) {
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index 05adb6a666cf..3269292efecc 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -42,12 +42,12 @@
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
-#elif defined(CONFIG_ARCH_OMAP34XX)
+#elif defined(CONFIG_ARCH_OMAP3)
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
#define SMC_MEM_RESERVED 1
-#elif defined(CONFIG_ARCH_OMAP24XX)
+#elif defined(CONFIG_ARCH_OMAP2)
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 8371b82323ac..3f2f7843aa4e 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -64,7 +64,6 @@ static const char version[] =
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -434,18 +433,18 @@ static void smc_shutdown( int ioaddr )
*/
-static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * addrs ) {
+static void smc_setmulticast(int ioaddr, struct net_device *dev)
+{
int i;
unsigned char multicast_table[ 8 ];
- struct dev_mc_list * cur_addr;
+ struct dev_mc_list *cur_addr;
/* table for flipping the order of 3 bits */
unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
/* start with a table of all zeros: reject all */
memset( multicast_table, 0, sizeof( multicast_table ) );
- cur_addr = addrs;
- for ( i = 0; i < count ; i ++, cur_addr = cur_addr->next ) {
+ netdev_for_each_mc_addr(cur_addr, dev) {
int position;
/* do we have a pointer here? */
@@ -1542,7 +1541,7 @@ static void smc_set_multicast_list(struct net_device *dev)
/* We just get all multicast packets even if we only want them
. from one source. This will be changed at some future
. point. */
- else if (dev->mc_count ) {
+ else if (!netdev_mc_empty(dev)) {
/* support hardware multicasting */
/* be sure I get rid of flags I might have set */
@@ -1550,7 +1549,7 @@ static void smc_set_multicast_list(struct net_device *dev)
ioaddr + RCR );
/* NOTE: this has to set the bank, so make sure it is the
last thing called. The bank is set to zero at the top */
- smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ smc_setmulticast(ioaddr, dev);
}
else {
outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index ea4fae79d6ec..860339d51d58 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -70,7 +70,6 @@ static const char version[] =
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
@@ -1395,7 +1394,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(2, "%s: RCR_ALMUL\n", dev->name);
lp->rcr_cur_mode |= RCR_ALMUL;
}
@@ -1412,8 +1411,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* the number of the 8 bit register, while the low 3 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
- int i;
+ else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *cur_addr;
/* table for flipping the order of 3 bits */
@@ -1422,13 +1420,9 @@ static void smc_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ netdev_for_each_mc_addr(cur_addr, dev) {
int position;
- /* do we have a pointer here? */
- if (!cur_addr)
- break;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
if (!(*cur_addr->dmi_addr & 1))
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 54799544bda3..8d2772cc42f2 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -330,6 +330,48 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#include <unit/smc91111.h>
+#elif defined(CONFIG_ARCH_MSM)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
+
+#elif defined(CONFIG_COLDFIRE)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+static inline void mcf_insw(void *a, unsigned char *p, int l)
+{
+ u16 *wp = (u16 *) p;
+ while (l-- > 0)
+ *wp++ = readw(a);
+}
+
+static inline void mcf_outsw(void *a, unsigned char *p, int l)
+{
+ u16 *wp = (u16 *) p;
+ while (l-- > 0)
+ writew(*wp++, a);
+}
+
+#define SMC_inw(a, r) _swapw(readw((a) + (r)))
+#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
+#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
+
+#define SMC_IRQ_FLAGS (IRQF_DISABLED)
+
#else
/*
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 494cd91ea39c..cbf520d38eac 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -41,7 +41,6 @@
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/bug.h>
#include <linux/bitops.h>
@@ -770,29 +769,25 @@ static int smsc911x_mii_probe(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
struct phy_device *phydev = NULL;
- int phy_addr;
+ int ret;
/* find the first phy */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (pdata->mii_bus->phy_map[phy_addr]) {
- phydev = pdata->mii_bus->phy_map[phy_addr];
- SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
- phy_addr, phydev->addr, phydev->phy_id);
- break;
- }
- }
-
+ phydev = phy_find_first(pdata->mii_bus);
if (!phydev) {
pr_err("%s: no PHY found\n", dev->name);
return -ENODEV;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface);
+ SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
+ phy_addr, phydev->addr, phydev->phy_id);
- if (IS_ERR(phydev)) {
+ ret = phy_connect_direct(dev, phydev,
+ &smsc911x_phy_adjust_link, 0,
+ pdata->config.phy_interface);
+
+ if (ret) {
pr_err("%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
+ return ret;
}
pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
@@ -1383,33 +1378,24 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
pdata->hashhi = 0;
pdata->hashlo = 0;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
- unsigned int count = 0;
- struct dev_mc_list *mc_list = dev->mc_list;
+ struct dev_mc_list *mc_list;
pdata->set_bits_mask = MAC_CR_HPFILT_;
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- while (mc_list) {
- count++;
- if ((mc_list->dmi_addrlen) == ETH_ALEN) {
- unsigned int bitnum =
- smsc911x_hash(mc_list->dmi_addr);
- unsigned int mask = 0x01 << (bitnum & 0x1F);
- if (bitnum & 0x20)
- hash_high |= mask;
- else
- hash_low |= mask;
- } else {
- SMSC_WARNING(DRV, "dmi_addrlen != 6");
- }
- mc_list = mc_list->next;
+ netdev_for_each_mc_addr(mc_list, dev) {
+ unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
+ unsigned int mask = 0x01 << (bitnum & 0x1F);
+
+ if (bitnum & 0x20)
+ hash_high |= mask;
+ else
+ hash_low |= mask;
}
- if (count != (unsigned int)dev->mc_count)
- SMSC_WARNING(DRV, "mc_count != dev->mc_count");
pdata->hashhi = hash_high;
pdata->hashlo = hash_low;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3c..aafaebf45748 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -26,6 +26,7 @@
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
#include "smsc9420.h"
@@ -80,7 +81,7 @@ struct smsc9420_pdata {
int last_carrier;
};
-static const struct pci_device_id smsc9420_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
{ PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
@@ -1062,12 +1063,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr &= (~MAC_CR_PRMS_);
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
- } else if (dev->mc_count > 0) {
- struct dev_mc_list *mc_list = dev->mc_list;
+ } else if (!netdev_mc_empty(dev)) {
+ struct dev_mc_list *mc_list;
u32 hash_lo = 0, hash_hi = 0;
smsc_dbg(HW, "Multicast filter enabled");
- while (mc_list) {
+ netdev_for_each_mc_addr(mc_list, dev) {
u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
u32 mask = 1 << (bit_num & 0x1F);
@@ -1076,7 +1077,6 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
else
hash_lo |= mask;
- mc_list = mc_list->next;
}
smsc9420_reg_write(pd, HASHH, hash_hi);
smsc9420_reg_write(pd, HASHL, hash_lo);
@@ -1348,7 +1348,7 @@ static int smsc9420_open(struct net_device *dev)
netif_carrier_off(dev);
- /* disable, mask and acknowlege all interrupts */
+ /* disable, mask and acknowledge all interrupts */
spin_lock_irqsave(&pd->int_lock, flags);
int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
smsc9420_reg_write(pd, INT_CFG, int_cfg);
diff --git a/drivers/net/sni_82596.c b/drivers/net/sni_82596.c
index 854ccf2b4105..6b2a88817473 100644
--- a/drivers/net/sni_82596.c
+++ b/drivers/net/sni_82596.c
@@ -8,7 +8,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 9599ce77ef85..287c251075e5 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -531,7 +531,7 @@ static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
unsigned char *addr;
int i;
@@ -541,19 +541,22 @@ static void sonic_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
rcr |= SONIC_RCR_PRO;
} else {
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
if (sonic_debug > 2)
- printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
+ printk("sonic_multicast_list: mc_count %d\n",
+ netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
- for (i = 1; i <= dev->mc_count; i++) {
+ i = 1;
+ netdev_for_each_mc_addr(dmi, dev) {
addr = dmi->dmi_addr;
- dmi = dmi->next;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
+ i++;
}
SONIC_WRITE(SONIC_CDC, 16);
/* issue Load CAM command */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bfc..dd3cb0f2d21f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/init.h>
+#include <linux/gfp.h>
#include <linux/ioport.h>
#include <linux/ip.h>
#include <linux/kernel.h>
@@ -40,7 +41,6 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
char spider_net_driver_name[] = "spidernet";
-static struct pci_device_id spider_net_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
@@ -474,7 +474,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
* spider_net_enable_rxchtails - sets RX dmac chain tail addresses
* @card: card structure
*
- * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
+ * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
* chip by writing to the appropriate register. DMA is enabled in
* spider_net_enable_rxdmac.
*/
@@ -646,7 +646,7 @@ spider_net_set_multi(struct net_device *netdev)
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
- for (mc = netdev->mc_list; mc; mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
set_bit(hash, bitmask);
}
@@ -1820,7 +1820,7 @@ spider_net_enable_card(struct spider_net_card *card)
spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
- /* set chain tail adress for RX chains and
+ /* set chain tail address for RX chains and
* enable DMA */
spider_net_enable_rxchtails(card);
spider_net_enable_rxdmac(card);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 95db60adde41..6dfa69899019 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
CH_6915 = 0,
};
-static struct pci_device_id starfire_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
{ 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
{ 0, }
};
@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
if (retval) {
printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
FIRMWARE_RX);
- return retval;
+ goto out_init;
}
if (fw_rx->size % 4) {
printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
@@ -1108,6 +1108,9 @@ out_tx:
release_firmware(fw_tx);
out_rx:
release_firmware(fw_rx);
+out_init:
+ if (retval)
+ netdev_close(dev);
return retval;
}
@@ -1793,22 +1796,22 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode |= AcceptAll;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
- } else if (dev->mc_count <= 14) {
+ } else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (__be16 *)mclist->dmi_addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
}
eaddrs = (__be16 *)dev->dev_addr;
+ i = netdev_mc_count(dev) + 2;
while (i++ < 16) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
@@ -1822,8 +1825,7 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7f..eb63d44748a7 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -2,10 +2,12 @@ config STMMAC_ETH
tristate "STMicroelectronics 10/100/1000 Ethernet driver"
select MII
select PHYLIB
+ select CRC32
depends on NETDEVICES && CPU_SUBTYPE_ST40
help
- This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
- controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core and fully tested on the STMicroelectronics
+ platforms.
if STMMAC_ETH
@@ -32,7 +34,8 @@ config STMMAC_TIMER
default n
help
Use an external timer for mitigating the number of network
- interrupts.
+ interrupts. Currently, for SH architectures, it is possible
+ to use the TMU channel 2 and the SH-RTC device.
choice
prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564dfa..c776af15fe1a 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- mac100.o gmac.o $(stmmac-y)
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e887..2a58172e986a 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
*******************************************************************************/
#include "descs.h"
-#include <linux/io.h>
-
-/* *********************************************
- DMA CRS Control and Status Register Mapping
- * *********************************************/
-#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
-#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
-#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
-#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
-#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
-#define DMA_STATUS 0x00001014 /* Status Register */
-#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
-#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
-#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
-#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
-#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
-
-/* ********************************
- DMA Control register defines
- * ********************************/
-#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
-#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
-
-/* **************************************
- DMA Interrupt Enable register defines
- * **************************************/
-/**** NORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
-#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
-#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
-#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
-
-#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TIE)
-
-/**** ABNORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
-#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
-#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
-#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
-#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
-#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
-#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
-#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
-#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
-
-#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
- DMA_INTR_ENA_UNE)
-
-/* DMA default interrupt mask */
-#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
-
-/* ****************************
- * DMA Status register defines
- * ****************************/
-#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
-#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
-#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
-#define DMA_STATUS_GMI 0x08000000
-#define DMA_STATUS_GLI 0x04000000
-#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
-#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
-#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
-#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
-#define DMA_STATUS_TS_SHIFT 20
-#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
-#define DMA_STATUS_RS_SHIFT 17
-#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
-#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
-#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
-#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
-#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
-#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
-#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
-#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
-#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
-#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-
-/* Other defines */
-#define HASH_TABLE_SIZE 64
-#define PAUSE_TIME 0x200
-
-/* Flow Control defines */
-#define FLOW_OFF 0
-#define FLOW_RX 1
-#define FLOW_TX 2
-#define FLOW_AUTO (FLOW_TX | FLOW_RX)
-
-/* DMA STORE-AND-FORWARD Operation Mode */
-#define SF_DMA_MODE 1
-
-#define HW_CSUM 1
-#define NO_HW_CSUM 0
-
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
-#define BUF_SIZE_4KiB 4096
-#define BUF_SIZE_2KiB 2048
-
-/* Power Down and WOL */
-#define PMT_NOT_SUPPORTED 0
-#define PMT_SUPPORTED 1
-
-/* Common MAC defines */
-#define MAC_CTRL_REG 0x00000000 /* MAC Control */
-#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
-
-/* MAC Management Counters register */
-#define MMC_CONTROL 0x00000100 /* MMC Control */
-#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
-#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
-#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
-#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
-
-#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
-#define MMC_CONTROL_MAX_FRM_SHIFT 3
-#define MMC_CONTROL_MAX_FRAME 0x7FF
+#include <linux/netdevice.h>
struct stmmac_extra_stats {
/* Transmit errors */
@@ -169,7 +44,7 @@ struct stmmac_extra_stats {
unsigned long rx_toolong;
unsigned long rx_collision;
unsigned long rx_crc;
- unsigned long rx_lenght;
+ unsigned long rx_length;
unsigned long rx_mii;
unsigned long rx_multicast;
unsigned long rx_gmac_overflow;
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
unsigned long normal_irq_n;
};
-/* GMAC core can compute the checksums in HW. */
-enum rx_frame_status {
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
csum_none = 2,
};
-static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
- unsigned int high, unsigned int low)
-{
- unsigned long data;
-
- data = (addr[5] << 8) | addr[4];
- writel(data, ioaddr + high);
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + low);
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
- return;
-}
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
-static inline void stmmac_get_mac_addr(unsigned long ioaddr,
- unsigned char *addr, unsigned int high,
- unsigned int low)
-{
- unsigned int hi_addr, lo_addr;
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
- /* Read the MAC address from the hardware */
- hi_addr = readl(ioaddr + high);
- lo_addr = readl(ioaddr + low);
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
- /* Extract the MAC address from the high and low words */
- addr[0] = lo_addr & 0xff;
- addr[1] = (lo_addr >> 8) & 0xff;
- addr[2] = (lo_addr >> 16) & 0xff;
- addr[3] = (lo_addr >> 24) & 0xff;
- addr[4] = hi_addr & 0xff;
- addr[5] = (hi_addr >> 8) & 0xff;
+/* MAC Management Counters register */
+#define MMC_CONTROL 0x00000100 /* MMC Control */
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
- return;
-}
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
+#define MMC_CONTROL_MAX_FRAME 0x7FF
-struct stmmac_ops {
- /* MAC core initialization */
- void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
- /* DMA core initialization */
- int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
- /* Dump MAC registers */
- void (*dump_mac_regs) (unsigned long ioaddr);
- /* Dump DMA registers */
- void (*dump_dma_regs) (unsigned long ioaddr);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
- /* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr);
- /* RX descriptor ring initialization */
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
- /* TX descriptor ring initialization */
+ int disable_rx_ic);
+ /* DMA TX descriptor ring initialization */
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
/* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (unsigned long ioaddr);
int (*get_rx_owner) (struct dma_desc *p);
void (*set_rx_owner) (struct dma_desc *p);
/* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
/* Return the reception status looking at the RDES1 */
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
struct dma_desc *p);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump DMA registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr);
+ void (*enable_dma_transmission) (unsigned long ioaddr);
+ void (*enable_dma_irq) (unsigned long ioaddr);
+ void (*disable_dma_irq) (unsigned long ioaddr);
+ void (*start_tx) (unsigned long ioaddr);
+ void (*stop_tx) (unsigned long ioaddr);
+ void (*start_rx) (unsigned long ioaddr);
+ void (*stop_rx) (unsigned long ioaddr);
+ int (*dma_interrupt) (unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ /* Dump MAC registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (unsigned long ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev);
/* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
void (*pmt) (unsigned long ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
};
struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
-struct hw_cap {
- unsigned int version; /* Core Version register (GMAC) */
- unsigned int pmt; /* Power-Down mode (GMAC) */
+struct mac_device_info {
+ struct stmmac_ops *mac;
+ struct stmmac_desc_ops *desc;
+ struct stmmac_dma_ops *dma;
+ unsigned int pmt; /* support Power-Down */
+ struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- struct mii_regs mii;
};
-struct mac_device_info {
- struct hw_cap hw;
- struct stmmac_ops *ops;
-};
+struct mac_device_info *dwmac1000_setup(unsigned long addr);
+struct mac_device_info *dwmac100_setup(unsigned long addr);
-struct mac_device_info *gmac_setup(unsigned long addr);
-struct mac_device_info *mac100_setup(unsigned long addr);
+extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e57..63a03e264694 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
- Header File to describe the DMA descriptors
- Use enhanced descriptors in case of GMAC Cores.
+ Header File to describe the DMA descriptors.
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062b..4cacca614fc1 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,24 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/slab.h>
#include "common.h"
-#include "mac100.h"
+#include "dwmac100.h"
+#include "dwmac_dma.h"
-#undef MAC100_DEBUG
-/*#define MAC100_DEBUG*/
-#ifdef MAC100_DEBUG
+#undef DWMAC100_DEBUG
+/*#define DWMAC100_DEBUG*/
+#ifdef DWMAC100_DEBUG
#define DBG(fmt, args...) printk(fmt, ## args)
#else
#define DBG(fmt, args...) do { } while (0)
#endif
-static void mac100_core_init(unsigned long ioaddr)
+static void dwmac100_core_init(unsigned long ioaddr)
{
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -54,43 +55,43 @@ static void mac100_core_init(unsigned long ioaddr)
return;
}
-static void mac100_dump_mac_regs(unsigned long ioaddr)
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
{
pr_info("\t----------------------------------------------\n"
- "\t MAC100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
+ readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
+ readl(ioaddr + MAC_ADDR_HIGH));
pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
+ readl(ioaddr + MAC_ADDR_LOW));
pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
pr_info("\tflow control (offset 0x%x): 0x%08x\n",
MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
+ readl(ioaddr + MAC_VLAN1));
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
+ readl(ioaddr + MAC_VLAN2));
pr_info("\n\tMAC management counter registers\n");
pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
return;
}
-static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +118,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
/* Store and Forward capability is not used at all..
* The transmit threshold can be programmed by
* setting the TTC bits in the DMA control register.*/
-static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +135,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
return;
}
-static void mac100_dump_dma_regs(unsigned long ioaddr)
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
{
int i;
- DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+ DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
for (i = 0; i < 9; i++)
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
(DMA_BUS_MODE + i * 4),
@@ -151,8 +152,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
}
/* DMA controller has two counters to track the number of
- the receive missed frames. */
-static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x,
unsigned long ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +183,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
return;
}
-static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
@@ -217,7 +220,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int mac100_get_tx_len(struct dma_desc *p)
+static int dwmac100_get_tx_len(struct dma_desc *p)
{
return p->des01.tx.buffer1_size;
}
@@ -226,14 +229,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns csum_none becasue the device
* is not able to compute the csum in HW. */
-static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = csum_none;
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("mac100 Error: Oversized Ethernet "
+ pr_warning("dwmac100 Error: Oversized Ethernet "
"frame spanned multiple buffers\n");
stats->rx_length_errors++;
return discard_frame;
@@ -262,7 +266,7 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
ret = discard_frame;
if (unlikely(p->des01.rx.length_error)) {
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
if (unlikely(p->des01.rx.mii_error)) {
@@ -276,24 +280,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void mac100_irq_status(unsigned long ioaddr)
+static void dwmac100_irq_status(unsigned long ioaddr)
{
return;
}
-static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_set_filter(struct net_device *dev)
+static void dwmac100_set_filter(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -302,29 +306,27 @@ static void mac100_set_filter(struct net_device *dev)
value |= MAC_CONTROL_PR;
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
MAC_CONTROL_HP);
- } else if ((dev->mc_count > HASH_TABLE_SIZE)
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|| (dev->flags & IFF_ALLMULTI)) {
value |= MAC_CONTROL_PM;
value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (dev->mc_count == 0) { /* no multicast */
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
MAC_CONTROL_HO | MAC_CONTROL_HP);
} else {
- int i;
u32 mc_filter[2];
struct dev_mc_list *mclist;
/* Perfect filter mode for physical address and Hash
filter for multicast */
value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
- | MAC_CONTROL_HO);
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
/* The upper 6 bits of the calculated CRC are used to
* index the contens of the hash table */
int bit_nr =
@@ -347,7 +349,7 @@ static void mac100_set_filter(struct net_device *dev)
return;
}
-static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +361,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
return;
}
-/* No PMT module supported in our SoC for the Ethernet Controller. */
-static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
{
return;
}
-static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -381,7 +385,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
for (i = 0; i < ring_size; i++) {
@@ -393,32 +397,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int mac100_get_tx_owner(struct dma_desc *p)
+static int dwmac100_get_tx_owner(struct dma_desc *p)
{
return p->des01.tx.own;
}
-static int mac100_get_rx_owner(struct dma_desc *p)
+static int dwmac100_get_rx_owner(struct dma_desc *p)
{
return p->des01.rx.own;
}
-static void mac100_set_tx_owner(struct dma_desc *p)
+static void dwmac100_set_tx_owner(struct dma_desc *p)
{
p->des01.tx.own = 1;
}
-static void mac100_set_rx_owner(struct dma_desc *p)
+static void dwmac100_set_rx_owner(struct dma_desc *p)
{
p->des01.rx.own = 1;
}
-static int mac100_get_tx_ls(struct dma_desc *p)
+static int dwmac100_get_tx_ls(struct dma_desc *p)
{
return p->des01.tx.last_segment;
}
-static void mac100_release_tx_desc(struct dma_desc *p)
+static void dwmac100_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.tx.end_ring;
@@ -444,74 +448,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
return;
}
-static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.tx.first_segment = is_fs;
p->des01.tx.buffer1_size = len;
}
-static void mac100_clear_tx_ic(struct dma_desc *p)
+static void dwmac100_clear_tx_ic(struct dma_desc *p)
{
p->des01.tx.interrupt = 0;
}
-static void mac100_close_tx_desc(struct dma_desc *p)
+static void dwmac100_close_tx_desc(struct dma_desc *p)
{
p->des01.tx.last_segment = 1;
p->des01.tx.interrupt = 1;
}
-static int mac100_get_rx_frame_len(struct dma_desc *p)
+static int dwmac100_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.rx.frame_length;
}
-struct stmmac_ops mac100_driver = {
- .core_init = mac100_core_init,
- .dump_mac_regs = mac100_dump_mac_regs,
- .dma_init = mac100_dma_init,
- .dump_dma_regs = mac100_dump_dma_regs,
- .dma_mode = mac100_dma_operation_mode,
- .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
- .tx_status = mac100_get_tx_frame_status,
- .rx_status = mac100_get_rx_frame_status,
- .get_tx_len = mac100_get_tx_len,
- .set_filter = mac100_set_filter,
- .flow_ctrl = mac100_flow_ctrl,
- .pmt = mac100_pmt,
- .init_rx_desc = mac100_init_rx_desc,
- .init_tx_desc = mac100_init_tx_desc,
- .get_tx_owner = mac100_get_tx_owner,
- .get_rx_owner = mac100_get_rx_owner,
- .release_tx_desc = mac100_release_tx_desc,
- .prepare_tx_desc = mac100_prepare_tx_desc,
- .clear_tx_ic = mac100_clear_tx_ic,
- .close_tx_desc = mac100_close_tx_desc,
- .get_tx_ls = mac100_get_tx_ls,
- .set_tx_owner = mac100_set_tx_owner,
- .set_rx_owner = mac100_set_rx_owner,
- .get_rx_frame_len = mac100_get_rx_frame_len,
- .host_irq_status = mac100_irq_status,
- .set_umac_addr = mac100_set_umac_addr,
- .get_umac_addr = mac100_get_umac_addr,
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *mac100_setup(unsigned long ioaddr)
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
+
+struct stmmac_desc_ops dwmac100_desc_ops = {
+ .tx_status = dwmac100_get_tx_frame_status,
+ .rx_status = dwmac100_get_rx_frame_status,
+ .get_tx_len = dwmac100_get_tx_len,
+ .init_rx_desc = dwmac100_init_rx_desc,
+ .init_tx_desc = dwmac100_init_tx_desc,
+ .get_tx_owner = dwmac100_get_tx_owner,
+ .get_rx_owner = dwmac100_get_rx_owner,
+ .release_tx_desc = dwmac100_release_tx_desc,
+ .prepare_tx_desc = dwmac100_prepare_tx_desc,
+ .clear_tx_ic = dwmac100_clear_tx_ic,
+ .close_tx_desc = dwmac100_close_tx_desc,
+ .get_tx_ls = dwmac100_get_tx_ls,
+ .set_tx_owner = dwmac100_set_tx_owner,
+ .set_rx_owner = dwmac100_set_rx_owner,
+ .get_rx_frame_len = dwmac100_get_rx_frame_len,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
{
struct mac_device_info *mac;
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- pr_info("\tMAC 10/100\n");
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->desc = &dwmac100_desc_ops;
+ mac->dma = &dwmac100_dma_ops;
- mac->ops = &mac100_driver;
- mac->hw.pmt = PMT_NOT_SUPPORTED;
- mac->hw.link.port = MAC_CONTROL_PS;
- mac->hw.link.duplex = MAC_CONTROL_F;
- mac->hw.link.speed = 0;
- mac->hw.mii.addr = MAC_MII_ADDR;
- mac->hw.mii.data = MAC_MII_DATA;
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
return mac;
}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..0f8f110d004a 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a148..62dca0e384e7 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
#define GMAC_CONTROL 0x00000000 /* Configuration */
#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum gmac_irq_status {
+enum dwmac1000_irq_status {
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
#define GMAC_MMC_RX_INTR 0x104
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+#undef DWMAC1000_DEBUG
+/* #define DWMAC1000__DEBUG */
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+#ifdef DWMAC1000__DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+extern struct stmmac_dma_ops dwmac1000_dma_ops;
+extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 000000000000..5bd95ebfe498
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,244 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* STBus Bridge Configuration */
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
+
+ /* Freeze MMC counters */
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+ return;
+}
+
+static void dwmac1000_dump_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+ return;
+}
+
+static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int value = 0;
+
+ DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (!netdev_mc_empty(dev)) {
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(mclist, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+ return;
+}
+
+static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+ return;
+}
+
+static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode == WAKE_MAGIC) {
+ DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ } else if (mode == WAKE_UCAST) {
+ DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+ return;
+}
+
+
+static void dwmac1000_irq_status(unsigned long ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+
+ return;
+}
+
+struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+};
+
+struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 uid = readl(ioaddr + GMAC_VERSION);
+
+ pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ mac->mac = &dwmac1000_ops;
+ mac->desc = &dwmac1000_desc_ops;
+ mac->dma = &dwmac1000_dma_ops;
+
+ mac->pmt = PMT_SUPPORTED;
+ mac->link.port = GMAC_CONTROL_PS;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed = GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee68953..39d436a2da68 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
+ This contains the functions to handle the dma and descriptors.
+
Copyright (C) 2007-2009 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-
-#include "stmmac.h"
-#include "gmac.h"
-
-#undef GMAC_DEBUG
-/*#define GMAC_DEBUG*/
-#undef FRAME_FILTER_DEBUG
-/*#define FRAME_FILTER_DEBUG*/
-#ifdef GMAC_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
-static void gmac_dump_regs(unsigned long ioaddr)
-{
- int i;
- pr_info("\t----------------------------------------------\n"
- "\t GMAC registers (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
-
- for (i = 0; i < 55; i++) {
- int offset = i * 4;
- pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
- offset, readl(ioaddr + offset));
- }
- return;
-}
-
-static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
}
/* Transmit FIFO flush operation */
-static void gmac_flush_tx_fifo(unsigned long ioaddr)
+static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
}
/* Not yet implemented --- no RMON module */
-static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr)
+static void dwmac1000_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x, unsigned long ioaddr)
{
return;
}
-static void gmac_dump_dma_regs(unsigned long ioaddr)
+static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
{
int i;
pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
return;
}
-static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+static int dwmac1000_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.frame_flushed)) {
DBG(KERN_ERR "\tframe_flushed error\n");
x->tx_frame_flushed++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.underflow_error)) {
DBG(KERN_ERR "\tunderflow error\n");
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
x->tx_underflow++;
}
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.payload_error)) {
DBG(KERN_ERR "\tAddr/Payload csum error\n");
x->tx_payload_error++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
ret = -1;
@@ -245,19 +218,19 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int gmac_get_tx_len(struct dma_desc *p)
+static int dwmac1000_get_tx_len(struct dma_desc *p)
{
return p->des01.etx.buffer1_size;
}
-static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
{
int ret = good_frame;
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
/* bits 5 7 0 | Frame status
* ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
* 1 0 0 | IPv4/6 No CSUM errorS.
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
* 1 1 0 | IPv4/6 CSUM IP HR error
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p)
+static int dwmac1000_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x, struct dma_desc *p)
{
int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+ ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
if (unlikely(p->des01.erx.dribbling)) {
@@ -358,7 +331,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
}
if (unlikely(p->des01.erx.length_error)) {
DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void gmac_irq_status(unsigned long ioaddr)
-{
- u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
-
- /* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
- if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
- /* clear the PMT bits 5 and 6 by reading the PMT
- * status register. */
- readl(ioaddr + GMAC_PMT);
- }
-
- return;
-}
-
-static void gmac_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CORE_INIT;
- writel(value, ioaddr + GMAC_CONTROL);
-
- /* STBus Bridge Configuration */
- /*writel(0xc5608, ioaddr + 0x00007000);*/
-
- /* Freeze MMC counters */
- writel(0x8, ioaddr + GMAC_MMC_CTRL);
- /* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
-
-#ifdef STMMAC_VLAN_TAG_USED
- /* Tag detection without filtering */
- writel(0x0, ioaddr + GMAC_VLAN_TAG);
-#endif
- return;
-}
-
-static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- unsigned int value = 0;
-
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, dev->mc_count, dev->uc_count);
-
- if (dev->flags & IFF_PROMISC)
- value = GMAC_FRAME_FILTER_PR;
- else if ((dev->mc_count > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value = GMAC_FRAME_FILTER_PM; /* pass all multi */
- writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
- } else if (dev->mc_count > 0) {
- int i;
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Hash filter for multicast */
- value = GMAC_FRAME_FILTER_HMC;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
- /* The upper 6 bits of the calculated CRC are used to
- index the contens of the hash table */
- int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
- }
-
- /* Handle multiple unicast addresses (perfect filtering)*/
- if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
- /* Switch to promiscuous mode is more than 16 addrs
- are required */
- value |= GMAC_FRAME_FILTER_PR;
- else {
- int i;
- struct dev_addr_list *uc_ptr = dev->uc_list;
-
- for (i = 0; i < dev->uc_count; i++) {
- gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
- i + 1);
-
- DBG(KERN_INFO "\t%d "
- "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
- "%02x\n", i + 1,
- uc_ptr->da_addr[0], uc_ptr->da_addr[1],
- uc_ptr->da_addr[2], uc_ptr->da_addr[3],
- uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
- uc_ptr = uc_ptr->next;
- }
- }
-
-#ifdef FRAME_FILTER_DEBUG
- /* Enable Receive all mode (to debug filtering_fail errors) */
- value |= GMAC_FRAME_FILTER_RA;
-#endif
- writel(value, ioaddr + GMAC_FRAME_FILTER);
-
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
- "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
- readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
-}
-
-static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = 0;
-
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
- if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_RFE;
- }
- if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_TFE;
- }
-
- if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
- flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
- }
-
- writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
-}
-
-static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
-{
- unsigned int pmt = 0;
-
- if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
- pmt |= power_down | magic_pkt_en;
- } else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
- }
-
- writel(pmt, ioaddr + GMAC_PMT);
- return;
-}
-
-static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int gmac_get_tx_owner(struct dma_desc *p)
+static int dwmac1000_get_tx_owner(struct dma_desc *p)
{
return p->des01.etx.own;
}
-static int gmac_get_rx_owner(struct dma_desc *p)
+static int dwmac1000_get_rx_owner(struct dma_desc *p)
{
return p->des01.erx.own;
}
-static void gmac_set_tx_owner(struct dma_desc *p)
+static void dwmac1000_set_tx_owner(struct dma_desc *p)
{
p->des01.etx.own = 1;
}
-static void gmac_set_rx_owner(struct dma_desc *p)
+static void dwmac1000_set_rx_owner(struct dma_desc *p)
{
p->des01.erx.own = 1;
}
-static int gmac_get_tx_ls(struct dma_desc *p)
+static int dwmac1000_get_tx_ls(struct dma_desc *p)
{
return p->des01.etx.last_segment;
}
-static void gmac_release_tx_desc(struct dma_desc *p)
+static void dwmac1000_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.etx.end_ring;
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
return;
}
-static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des01.etx.checksum_insertion = cic_full;
}
-static void gmac_clear_tx_ic(struct dma_desc *p)
+static void dwmac1000_clear_tx_ic(struct dma_desc *p)
{
p->des01.etx.interrupt = 0;
}
-static void gmac_close_tx_desc(struct dma_desc *p)
+static void dwmac1000_close_tx_desc(struct dma_desc *p)
{
p->des01.etx.last_segment = 1;
p->des01.etx.interrupt = 1;
}
-static int gmac_get_rx_frame_len(struct dma_desc *p)
+static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.erx.frame_length;
}
-struct stmmac_ops gmac_driver = {
- .core_init = gmac_core_init,
- .dump_mac_regs = gmac_dump_regs,
- .dma_init = gmac_dma_init,
- .dump_dma_regs = gmac_dump_dma_regs,
- .dma_mode = gmac_dma_operation_mode,
- .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
- .tx_status = gmac_get_tx_frame_status,
- .rx_status = gmac_get_rx_frame_status,
- .get_tx_len = gmac_get_tx_len,
- .set_filter = gmac_set_filter,
- .flow_ctrl = gmac_flow_ctrl,
- .pmt = gmac_pmt,
- .init_rx_desc = gmac_init_rx_desc,
- .init_tx_desc = gmac_init_tx_desc,
- .get_tx_owner = gmac_get_tx_owner,
- .get_rx_owner = gmac_get_rx_owner,
- .release_tx_desc = gmac_release_tx_desc,
- .prepare_tx_desc = gmac_prepare_tx_desc,
- .clear_tx_ic = gmac_clear_tx_ic,
- .close_tx_desc = gmac_close_tx_desc,
- .get_tx_ls = gmac_get_tx_ls,
- .set_tx_owner = gmac_set_tx_owner,
- .set_rx_owner = gmac_set_rx_owner,
- .get_rx_frame_len = gmac_get_rx_frame_len,
- .host_irq_status = gmac_irq_status,
- .set_umac_addr = gmac_set_umac_addr,
- .get_umac_addr = gmac_get_umac_addr,
+struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .init = dwmac1000_dma_init,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
};
-struct mac_device_info *gmac_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
- u32 uid = readl(ioaddr + GMAC_VERSION);
-
- pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
- ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- mac->ops = &gmac_driver;
- mac->hw.pmt = PMT_SUPPORTED;
- mac->hw.link.port = GMAC_CONTROL_PS;
- mac->hw.link.duplex = GMAC_CONTROL_DM;
- mac->hw.link.speed = GMAC_CONTROL_FES;
- mac->hw.mii.addr = GMAC_MII_ADDR;
- mac->hw.mii.data = GMAC_MII_DATA;
-
- return mac;
-}
+struct stmmac_desc_ops dwmac1000_desc_ops = {
+ .tx_status = dwmac1000_get_tx_frame_status,
+ .rx_status = dwmac1000_get_rx_frame_status,
+ .get_tx_len = dwmac1000_get_tx_len,
+ .init_rx_desc = dwmac1000_init_rx_desc,
+ .init_tx_desc = dwmac1000_init_tx_desc,
+ .get_tx_owner = dwmac1000_get_tx_owner,
+ .get_rx_owner = dwmac1000_get_rx_owner,
+ .release_tx_desc = dwmac1000_release_tx_desc,
+ .prepare_tx_desc = dwmac1000_prepare_tx_desc,
+ .clear_tx_ic = dwmac1000_clear_tx_ic,
+ .close_tx_desc = dwmac1000_close_tx_desc,
+ .get_tx_ls = dwmac1000_get_tx_ls,
+ .set_tx_owner = dwmac1000_set_tx_owner,
+ .set_rx_owner = dwmac1000_set_rx_owner,
+ .get_rx_frame_len = dwmac1000_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 000000000000..de848d9f6060
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
+extern void dwmac_enable_dma_irq(unsigned long ioaddr);
+extern void dwmac_disable_dma_irq(unsigned long ioaddr);
+extern void dwmac_dma_start_tx(unsigned long ioaddr);
+extern void dwmac_dma_stop_tx(unsigned long ioaddr);
+extern void dwmac_dma_start_rx(unsigned long ioaddr);
+extern void dwmac_dma_stop_rx(unsigned long ioaddr);
+extern int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 000000000000..d4adb1eaa447
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#undef DWMAC_DMA_DEBUG
+#ifdef DWMAC_DMA_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(unsigned long ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(unsigned long ioaddr)
+{
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(unsigned long ioaddr)
+{
+ writel(0, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_stop_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_start_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+void dwmac_dma_stop_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+#endif
+
+int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+#ifdef DWMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DBG(INFO, "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DBG(INFO, "transmit jabber\n");
+ x->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DBG(INFO, "recv overflow\n");
+ x->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DBG(INFO, "receive buffer unavailable\n");
+ x->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DBG(INFO, "receive process stopped\n");
+ x->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DBG(INFO, "receive watchdog\n");
+ x->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DBG(INFO, "transmit early interrupt\n");
+ x->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DBG(INFO, "transmit process stopped\n");
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DBG(INFO, "fatal bus error\n");
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ x->normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ ret = handle_tx_rx;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DBG(INFO, "\n\n");
+ return ret;
+}
+
+
+void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+
+ return;
+}
+
+void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+
+ return;
+}
+
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e5..ba35e6943cf4 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_09"
+#define DRV_MODULE_VERSION "Jan_2010"
+#include <linux/stmmac.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
int rx_csum;
unsigned int dma_buf_sz;
struct device *device;
- struct mac_device_info *mac_type;
+ struct mac_device_info *hw;
struct stmmac_extra_stats xstats;
struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
int phy_mask;
int (*phy_reset) (void *priv);
void (*fix_mac_speed) (void *priv, unsigned int speed);
+ void (*bus_setup)(unsigned long ioaddr);
void *bsp_priv;
int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
#endif
};
+#ifdef CONFIG_STM_DRIVERS
+#include <linux/stm/pad.h>
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
+
+ /* Pad routing setup */
+ if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
+ dev_name(&pdev->dev)))) {
+ printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+#else
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a0758..c021eaa3ca69 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
#define REG_SPACE_SIZE 0x1054
#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -61,7 +62,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_toolong),
STMMAC_STAT(rx_collision),
STMMAC_STAT(rx_crc),
- STMMAC_STAT(rx_lenght),
+ STMMAC_STAT(rx_length),
STMMAC_STAT(rx_mii),
STMMAC_STAT(rx_multicast),
STMMAC_STAT(rx_gmac_overflow),
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
} else {
unsigned long ioaddr = netdev->base_addr;
- priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
- priv->flow_ctrl, priv->pause);
+ priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
}
spin_unlock(&priv->lock);
return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i;
/* Update HW stats if supported */
- priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
- ioaddr);
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
+ ioaddr);
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07f..4111a85ec80e 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
@@ -45,7 +44,7 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
-#include <linux/stm/soc.h>
+#include <linux/slab.h>
#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +225,38 @@ static void stmmac_adjust_link(struct net_device *dev)
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
- ctrl &= ~priv->mac_type->hw.link.duplex;
+ ctrl &= ~priv->hw->link.duplex;
else
- ctrl |= priv->mac_type->hw.link.duplex;
+ ctrl |= priv->hw->link.duplex;
priv->oldduplex = phydev->duplex;
}
/* Flow Control operation */
if (phydev->pause)
- priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
- fc, pause_time);
+ priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
+ fc, pause_time);
if (phydev->speed != priv->speed) {
new_state = 1;
switch (phydev->speed) {
case 1000:
if (likely(priv->is_gmac))
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
break;
case 100:
case 10:
if (priv->is_gmac) {
- ctrl |= priv->mac_type->hw.link.port;
+ ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
- ctrl |=
- priv->mac_type->hw.link.
- speed;
+ ctrl |= priv->hw->link.speed;
} else {
- ctrl &=
- ~(priv->mac_type->hw.
- link.speed);
+ ctrl &= ~(priv->hw->link.speed);
}
} else {
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
}
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ if (likely(priv->fix_mac_speed))
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
break;
default:
if (netif_msg_link(priv))
@@ -305,8 +301,8 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[BUS_ID_SIZE]; /* PHY to connect */
- char bus_id[BUS_ID_SIZE];
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
priv->oldlink = 0;
priv->speed = 0;
@@ -318,7 +314,8 @@ static int stmmac_init_phy(struct net_device *dev)
}
snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
- snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +505,8 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->cur_tx = 0;
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
if (netif_msg_hw(priv)) {
pr_info("RX descriptor ring:\n");
@@ -544,8 +541,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + i;
if (p->des2)
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
- DMA_TO_DEVICE);
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
@@ -575,50 +572,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
}
/**
- * stmmac_dma_start_tx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA tx process.
- */
-static void stmmac_dma_start_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-static void stmmac_dma_stop_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-/**
- * stmmac_dma_start_rx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA rx process.
- */
-static void stmmac_dma_start_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void stmmac_dma_stop_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv : pointer to the private device structure.
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +582,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
if (!priv->is_gmac) {
/* MAC 10/100 */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
priv->tx_coe = NO_HW_CSUM;
} else {
if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
- priv->mac_type->ops->dma_mode(priv->dev->base_addr,
- SF_DMA_MODE, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr,
+ SF_DMA_MODE, SF_DMA_MODE);
tc = SF_DMA_MODE;
priv->tx_coe = HW_CSUM;
} else {
/* Checksum computation is performed in software. */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
- SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
+ SF_DMA_MODE);
priv->tx_coe = NO_HW_CSUM;
}
}
@@ -649,88 +602,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
return;
}
-#ifdef STMMAC_DEBUG
-/**
- * show_tx_process_state
- * @status: tx descriptor status field
- * Description: it shows the Transmit Process State for CSR5[22:20]
- */
-static void show_tx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- TX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- TX (Running):Fetching the Tx desc\n");
- break;
- case 2:
- pr_info("- TX (Running): Waiting for end of tx\n");
- break;
- case 3:
- pr_info("- TX (Running): Reading the data "
- "and queuing the data into the Tx buf\n");
- break;
- case 6:
- pr_info("- TX (Suspended): Tx Buff Underflow "
- "or an unavailable Transmit descriptor\n");
- break;
- case 7:
- pr_info("- TX (Running): Closing Tx descriptor\n");
- break;
- default:
- break;
- }
- return;
-}
-
-/**
- * show_rx_process_state
- * @status: rx descriptor status field
- * Description: it shows the Receive Process State for CSR5[19:17]
- */
-static void show_rx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- RX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- RX (Running): Fetching the Rx desc\n");
- break;
- case 2:
- pr_info("- RX (Running):Checking for end of pkt\n");
- break;
- case 3:
- pr_info("- RX (Running): Waiting for Rx pkt\n");
- break;
- case 4:
- pr_info("- RX (Suspended): Unavailable Rx buf\n");
- break;
- case 5:
- pr_info("- RX (Running): Closing Rx descriptor\n");
- break;
- case 6:
- pr_info("- RX(Running): Flushing the current frame"
- " from the Rx buf\n");
- break;
- case 7:
- pr_info("- RX (Running): Queuing the Rx frame"
- " from the Rx buf into memory\n");
- break;
- default:
- break;
- }
- return;
-}
-#endif
-
/**
* stmmac_tx:
* @priv: private driver structure
@@ -748,16 +619,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
- if (priv->mac_type->ops->get_tx_owner(p))
+ if (priv->hw->desc->get_tx_owner(p))
break;
/* Verify tx error by looking at the last segment */
- last = priv->mac_type->ops->get_tx_ls(p);
+ last = priv->hw->desc->get_tx_ls(p);
if (likely(last)) {
int tx_error =
- priv->mac_type->ops->tx_status(&priv->dev->stats,
- &priv->xstats,
- p, ioaddr);
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -769,7 +640,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
if (likely(p->des2))
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
+ priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
if (unlikely(p->des3))
p->des3 = 0;
@@ -790,7 +661,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->tx_skbuff[entry] = NULL;
}
- priv->mac_type->ops->release_tx_desc(p);
+ priv->hw->desc->release_tx_desc(p);
entry = (++priv->dirty_tx) % txsize;
}
@@ -814,7 +685,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
priv->tm->timer_start(tmrate);
else
#endif
- writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
}
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +695,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
priv->tm->timer_stop();
else
#endif
- writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
}
static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +703,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
unsigned int has_work = 0;
int rxret, tx_work = 0;
- rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+ rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
(priv->cur_rx % priv->dma_rx_size));
if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +754,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
{
netif_stop_queue(priv->dev);
- stmmac_dma_stop_tx(priv->dev->base_addr);
+ priv->hw->dma->stop_tx(priv->dev->base_addr);
dma_free_tx_skbufs(priv);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
priv->dirty_tx = 0;
priv->cur_tx = 0;
- stmmac_dma_start_tx(priv->dev->base_addr);
+ priv->hw->dma->start_tx(priv->dev->base_addr);
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
@@ -896,95 +767,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
return;
}
-/**
- * stmmac_dma_interrupt - Interrupt handler for the driver
- * @dev: net device structure
- * Description: Interrupt handler for the driver (DMA).
- */
-static void stmmac_dma_interrupt(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- struct stmmac_priv *priv = netdev_priv(dev);
- /* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
-#ifdef STMMAC_DEBUG
- /* It displays the DMA transmit process state (CSR5 register) */
- if (netif_msg_tx_done(priv))
- show_tx_process_state(intr_status);
- if (netif_msg_rx_status(priv))
- show_rx_process_state(intr_status);
-#endif
- /* ABNORMAL interrupts */
- if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
- if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(intr, INFO, "transmit underflow\n");
- if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
- /* Try to bump up the threshold */
- tc += 64;
- priv->mac_type->ops->dma_mode(ioaddr, tc,
- SF_DMA_MODE);
- priv->xstats.threshold = tc;
- }
- stmmac_tx_err(priv);
- priv->xstats.tx_undeflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(intr, INFO, "transmit jabber\n");
- priv->xstats.tx_jabber_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(intr, INFO, "recv overflow\n");
- priv->xstats.rx_overflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(intr, INFO, "receive buffer unavailable\n");
- priv->xstats.rx_buf_unav_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(intr, INFO, "receive process stopped\n");
- priv->xstats.rx_process_stopped_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(intr, INFO, "receive watchdog\n");
- priv->xstats.rx_watchdog_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(intr, INFO, "transmit early interrupt\n");
- priv->xstats.tx_early_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(intr, INFO, "transmit process stopped\n");
- priv->xstats.tx_process_stopped_irq++;
- stmmac_tx_err(priv);
- }
- if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(intr, INFO, "fatal bus error\n");
- priv->xstats.fatal_bus_error_irq++;
- stmmac_tx_err(priv);
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ unsigned long ioaddr = priv->dev->base_addr;
+ int status;
+
+ status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
+ &priv->xstats);
+ if (likely(status == handle_tx_rx))
+ _stmmac_schedule(priv);
+
+ else if (unlikely(status == tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ tc += 64;
+ priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
+ priv->xstats.threshold = tc;
}
- }
-
- /* TX/RX NORMAL interrupts */
- if (intr_status & DMA_STATUS_NIS) {
- priv->xstats.normal_irq_n++;
- if (likely((intr_status & DMA_STATUS_RI) ||
- (intr_status & (DMA_STATUS_TI))))
- _stmmac_schedule(priv);
- }
-
- /* Optional hardware blocks, interrupts should be disabled */
- if (unlikely(intr_status &
- (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
- pr_info("%s: unexpected status %08x\n", __func__, intr_status);
-
- /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
- writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "\n\n");
+ stmmac_tx_err(priv);
+ } else if (unlikely(status == tx_hard_error))
+ stmmac_tx_err(priv);
return;
}
@@ -1058,17 +861,20 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
- priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+ if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
+ priv->dma_rx_phy) < 0)) {
pr_err("%s: DMA initialization failed\n", __func__);
return -1;
}
/* Copy the MAC addr into the HW */
- priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ /* If required, perform hw setup of the bus. */
+ if (priv->bus_setup)
+ priv->bus_setup(ioaddr);
/* Initialize the MAC Core */
- priv->mac_type->ops->core_init(ioaddr);
+ priv->hw->mac->core_init(ioaddr);
priv->shutdown = 0;
@@ -1089,16 +895,16 @@ static int stmmac_open(struct net_device *dev)
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
- priv->mac_type->ops->dump_mac_regs(ioaddr);
- priv->mac_type->ops->dump_dma_regs(ioaddr);
+ priv->hw->mac->dump_regs(ioaddr);
+ priv->hw->dma->dump_regs(ioaddr);
}
if (priv->phydev)
@@ -1142,8 +948,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
@@ -1214,8 +1020,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
desc->des2 = dma_map_single(priv->device, skb->data,
BUF_SIZE_8KiB, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -1224,16 +1030,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
skb->data + BUF_SIZE_8KiB,
buf2_size, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 0,
- buf2_size, csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
+ csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
return entry;
}
@@ -1301,8 +1107,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1123,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
frag->page_offset,
len, DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
- priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
- csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
}
/* Interrupt on completition only for the latest segment */
- priv->mac_type->ops->close_tx_desc(desc);
+ priv->hw->desc->close_tx_desc(desc);
#ifdef CONFIG_STMMAC_TIMER
/* Clean IC while using timer */
if (likely(priv->tm->enable))
- priv->mac_type->ops->clear_tx_ic(desc);
+ priv->hw->desc->clear_tx_ic(desc);
#endif
/* To avoid raise condition */
- priv->mac_type->ops->set_tx_owner(first);
+ priv->hw->desc->set_tx_owner(first);
priv->cur_tx++;
@@ -1353,8 +1158,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- /* CSR1 enables the transmit DMA to check for new descriptor */
- writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+ priv->hw->dma->enable_dma_transmission(dev->base_addr);
return NETDEV_TX_OK;
}
@@ -1391,7 +1195,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
- priv->mac_type->ops->set_rx_owner(p + entry);
+ priv->hw->desc->set_rx_owner(p + entry);
}
return;
}
@@ -1412,7 +1216,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
#endif
count = 0;
- while (!priv->mac_type->ops->get_rx_owner(p)) {
+ while (!priv->hw->desc->get_rx_owner(p)) {
int status;
if (count >= limit)
@@ -1425,15 +1229,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
prefetch(p_next);
/* read the status of the incoming frame */
- status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
- &priv->xstats, p));
+ status = (priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
if (unlikely(status == discard_frame))
priv->dev->stats.rx_errors++;
else {
struct sk_buff *skb;
/* Length should omit the CRC */
- int frame_len =
- priv->mac_type->ops->get_rx_frame_len(p) - 4;
+ int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
#ifdef STMMAC_RX_DEBUG
if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1372,7 @@ static void stmmac_multicast_list(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
- priv->mac_type->ops->set_filter(dev);
+ priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
return;
}
@@ -1623,9 +1426,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->is_gmac) {
unsigned long ioaddr = dev->base_addr;
/* To handle GMAC own interrupts */
- priv->mac_type->ops->host_irq_status(ioaddr);
+ priv->hw->mac->host_irq_status(ioaddr);
}
- stmmac_dma_interrupt(dev);
+
+ stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
}
@@ -1744,7 +1548,7 @@ static int stmmac_probe(struct net_device *dev)
netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
/* Get the MAC address */
- priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+ priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
if (!is_valid_ether_addr(dev->dev_addr))
pr_warning("\tno valid MAC address;"
@@ -1779,16 +1583,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
if (priv->is_gmac)
- device = gmac_setup(ioaddr);
+ device = dwmac1000_setup(ioaddr);
else
- device = mac100_setup(ioaddr);
+ device = dwmac100_setup(ioaddr);
if (!device)
return -ENOMEM;
- priv->mac_type = device;
+ priv->hw = device;
- priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
+ priv->wolenabled = priv->hw->pmt; /* PMT supported */
if (priv->wolenabled == PMT_SUPPORTED)
priv->wolopts = WAKE_MAGIC; /* Magic Frame */
@@ -1797,8 +1601,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
static int stmmacphy_dvr_probe(struct platform_device *pdev)
{
- struct plat_stmmacphy_data *plat_dat;
- plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+ struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
plat_dat->bus_id);
@@ -1830,9 +1633,7 @@ static struct platform_driver stmmacphy_driver = {
static int stmmac_associate_phy(struct device *dev, void *data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
- struct plat_stmmacphy_data *plat_dat;
-
- plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+ struct plat_stmmacphy_data *plat_dat = dev->platform_data;
DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
plat_dat->bus_id);
@@ -1885,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
}
pr_info("done!\n");
- if (!request_mem_region(res->start, (res->end - res->start),
+ if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
pr_err("%s: ERROR: memory allocation failed"
"cannot get the I/O addr 0x%x\n",
@@ -1894,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
goto out;
}
- addr = ioremap(res->start, (res->end - res->start));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
- pr_err("%s: ERROR: memory mapping failed \n", __func__);
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
ret = -ENOMEM;
goto out;
}
@@ -1922,7 +1723,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->device = &(pdev->dev);
priv->dev = ndev;
- plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+ plat_dat = pdev->dev.platform_data;
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1733,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
/* Set the I/O base addr */
ndev->base_addr = (unsigned long)addr;
+ /* Verify embedded resource for the platform */
+ ret = stmmac_claim_resource(pdev);
+ if (ret < 0)
+ goto out;
+
/* MAC HW revice detection */
ret = stmmac_mac_device_setup(ndev);
if (ret < 0)
@@ -1952,6 +1758,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
}
priv->fix_mac_speed = plat_dat->fix_mac_speed;
+ priv->bus_setup = plat_dat->bus_setup;
priv->bsp_priv = plat_dat->bsp_priv;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1968,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
out:
if (ret < 0) {
platform_set_drvdata(pdev, NULL);
- release_mem_region(res->start, (res->end - res->start));
+ release_mem_region(res->start, resource_size(res));
if (addr != NULL)
iounmap(addr);
}
@@ -1986,12 +1793,13 @@ out:
static int stmmac_dvr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
- stmmac_dma_stop_rx(ndev->base_addr);
- stmmac_dma_stop_tx(ndev->base_addr);
+ priv->hw->dma->stop_rx(ndev->base_addr);
+ priv->hw->dma->stop_tx(ndev->base_addr);
stmmac_mac_disable_rx(ndev->base_addr);
stmmac_mac_disable_tx(ndev->base_addr);
@@ -2005,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
iounmap((void *)ndev->base_addr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, (res->end - res->start));
+ release_mem_region(res->start, resource_size(res));
free_netdev(ndev);
@@ -2038,21 +1846,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx,
- priv->dma_rx_size, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx,
- priv->dma_tx_size);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
stmmac_mac_disable_tx(dev->base_addr);
if (device_may_wakeup(&(pdev->dev))) {
/* Enable Power down mode by programming the PMT regs */
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr,
- priv->wolopts);
+ priv->hw->mac->pmt(dev->base_addr,
+ priv->wolopts);
} else {
stmmac_mac_disable_rx(dev->base_addr);
}
@@ -2093,15 +1900,15 @@ static int stmmac_resume(struct platform_device *pdev)
* from another devices (e.g. serial console). */
if (device_may_wakeup(&(pdev->dev)))
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr, 0);
+ priv->hw->mac->pmt(dev->base_addr, 0);
netif_device_attach(dev);
/* Enable the MAC and DMA */
stmmac_mac_enable_rx(ioaddr);
stmmac_mac_enable_tx(ioaddr);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22fc..40b2c7929719 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,9 +24,9 @@
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/slab.h>
#include "stmmac.h"
@@ -48,8 +48,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +80,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
u16 value =
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +112,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_address = priv->hw->mii.addr;
if (priv->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index b447a8719427..8b28c89a9a77 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -33,7 +33,6 @@ static int fifo=0x8; /* don't change */
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -413,8 +412,8 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi=dev->mc_list;
- int num_addrs=dev->mc_count;
+ struct dev_mc_list *dmi;
+ int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,8 +535,10 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = swab16(num_addrs * 6);
- for(i=0;i<num_addrs;i++,dmi=dmi->next)
- memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ dmi->dmi_addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 0ca4241b4f63..1694ca5bfb41 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -28,7 +28,6 @@ static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.ne
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -917,7 +916,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 25e81ebd9cd8..ed7865a0b5b2 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -25,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/gfp.h>
#include <asm/auxio.h>
#include <asm/byteorder.h>
@@ -999,7 +999,7 @@ static void bigmac_set_multicast(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i;
u32 tmp, crc;
@@ -1013,7 +1013,7 @@ static void bigmac_set_multicast(struct net_device *dev)
while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
udelay(20);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writel(0xffff, bregs + BMAC_HTABLE0);
sbus_writel(0xffff, bregs + BMAC_HTABLE1);
sbus_writel(0xffff, bregs + BMAC_HTABLE2);
@@ -1028,9 +1028,8 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca60..8249a394a4e1 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -84,7 +84,6 @@ static char *media[MAX_UNITS];
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -206,7 +205,7 @@ IVc. Errata
#define USE_IO_OPS 1
#endif
-static const struct pci_device_id sundance_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
@@ -1517,19 +1516,18 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
if (crc & 0x80000000) index |= 1 << bit;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab9..e6880f1c4e8c 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -39,7 +39,6 @@
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -58,6 +57,7 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/mm.h>
+#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
#define GEM_MODULE_NAME "gem"
#define PFX GEM_MODULE_NAME ": "
-static struct pci_device_id gem_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -782,7 +782,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
break;
/* When writing back RX descriptor, GEM writes status
- * then buffer address, possibly in seperate transactions.
+ * then buffer address, possibly in separate transactions.
* If we don't wait for the chip to write both, we could
* post a new buffer to this descriptor then have GEM spam
* on the buffer address. We sync on the RX completion
@@ -1837,7 +1837,7 @@ static u32 gem_setup_multicast(struct gem *gp)
int i;
if ((gp->dev->flags & IFF_ALLMULTI) ||
- (gp->dev->mc_count > 256)) {
+ (netdev_mc_count(gp->dev) > 256)) {
for (i=0; i<16; i++)
writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
rxcfg |= MAC_RXCFG_HFE;
@@ -1846,17 +1846,13 @@ static u32 gem_setup_multicast(struct gem *gp)
} else {
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi = gp->dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
- for (i = 0; i < 16; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < gp->dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, gp->dev) {
char *addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8a..b17dbb11bd67 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1516,24 +1516,20 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
- (hp->dev->mc_count > 64)) {
+ (netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
- struct dev_mc_list *dmi = hp->dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < hp->dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, hp->dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
@@ -2366,14 +2362,13 @@ static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
spin_lock_irq(&hp->happy_lock);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
@@ -2384,12 +2379,9 @@ static void happy_meal_set_multicast(struct net_device *dev)
} else {
u16 hash_table[4];
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
@@ -3211,7 +3203,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
dev_set_drvdata(&pdev->dev, NULL);
}
-static struct pci_device_id happymeal_pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 64e7d08c878f..0c21653ff9f9 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -78,7 +78,6 @@ static char lancestr[] = "LANCE";
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -94,6 +93,7 @@ static char lancestr[] = "LANCE";
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -1170,9 +1170,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
u32 val;
@@ -1196,9 +1195,8 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 45c383f285ee..be637dce944c 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -627,7 +627,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u8 new_mconfig = qep->mconfig;
char *addrs;
int i;
@@ -636,7 +636,7 @@ static void qe_set_multicast(struct net_device *dev)
/* Lock out others. */
netif_stop_queue(dev);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
@@ -650,12 +650,9 @@ static void qe_set_multicast(struct net_device *dev)
u16 hash_table[4];
u8 *hbytes = (unsigned char *) &hash_table[0];
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index bc74db0d12f3..6b1b7cea7f6b 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -765,7 +765,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
{
struct dev_addr_list *p;
- for (p = dev->mc_list; p; p = p->next) {
+ netdev_for_each_mc_addr(p, dev) {
struct vnet_mcast_entry *m;
m = __vnet_mc_find(vp, p->dmi_addr);
@@ -1062,10 +1062,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
goto err_out_free_dev;
}
- printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+ printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
list_add(&vp->list, &vnet_list);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..49bd84c0d583 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
{ "TOSHIBA TC35815/TX4939" },
};
-static const struct pci_device_id tc35815_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
@@ -402,6 +402,7 @@ struct tc35815_local {
* by this lock as well.
*/
spinlock_t lock;
+ spinlock_t rx_lock;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -835,6 +836,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
INIT_WORK(&lp->restart_work, tc35815_restart_work);
spin_lock_init(&lp->lock);
+ spin_lock_init(&lp->rx_lock);
lp->pci_dev = pdev;
lp->chiptype = ent->driver_data;
@@ -1186,6 +1188,7 @@ static void tc35815_restart(struct net_device *dev)
printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
}
+ spin_lock_bh(&lp->rx_lock);
spin_lock_irq(&lp->lock);
tc35815_chip_reset(dev);
tc35815_clear_queues(dev);
@@ -1193,6 +1196,7 @@ static void tc35815_restart(struct net_device *dev)
/* Reconfigure CAM again since tc35815_chip_init() initialize it. */
tc35815_set_multicast_list(dev);
spin_unlock_irq(&lp->lock);
+ spin_unlock_bh(&lp->rx_lock);
netif_wake_queue(dev);
}
@@ -1211,11 +1215,14 @@ static void tc35815_schedule_restart(struct net_device *dev)
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
+ unsigned long flags;
/* disable interrupts */
+ spin_lock_irqsave(&lp->lock, flags);
tc_writel(0, &tr->Int_En);
tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
schedule_work(&lp->restart_work);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
static void tc35815_tx_timeout(struct net_device *dev)
@@ -1436,8 +1443,9 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
if (status & Int_IntMacTx) {
/* Transmit complete. */
lp->lstats.tx_ints++;
+ spin_lock_irq(&lp->lock);
tc35815_txdone(dev);
- netif_wake_queue(dev);
+ spin_unlock_irq(&lp->lock);
if (ret < 0)
ret = 0;
}
@@ -1650,7 +1658,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
int received = 0, handled;
u32 status;
- spin_lock(&lp->lock);
+ spin_lock(&lp->rx_lock);
status = tc_readl(&tr->Int_Src);
do {
/* BLEx, FDAEx will be cleared later */
@@ -1668,7 +1676,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
}
status = tc_readl(&tr->Int_Src);
} while (status);
- spin_unlock(&lp->lock);
+ spin_unlock(&lp->rx_lock);
if (received < budget) {
napi_complete(napi);
@@ -1941,23 +1949,23 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > CAM_ENTRY_MAX - 3) {
+ netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
/* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
- } else if (dev->mc_count) {
- struct dev_mc_list *cur_addr = dev->mc_list;
+ } else if (!netdev_mc_empty(dev)) {
+ struct dev_mc_list *cur_addr;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
- if (!cur_addr)
- break;
+ i = 0;
+ netdev_for_each_mc_addr(cur_addr, dev) {
/* entry 0,1 is reserved. */
tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
ena_bits |= CAM_Ena_Bit(i + 2);
+ i++;
}
tc_writel(ena_bits, &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b938..f5493092521a 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -62,9 +62,11 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "tehuti.h"
-static struct pci_device_id __devinitdata bdx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
{0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -105,26 +107,24 @@ static void print_hw_id(struct pci_dev *pdev)
pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
- printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME,
- nic->port_num == 1 ? "" : ", 2-Port");
- printk(KERN_INFO
- "tehuti: srom 0x%x fpga %d build %u lane# %d"
- " max_pl 0x%x mrrs 0x%x\n",
- readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
- readl(nic->regs + FPGA_SEED),
- GET_LINK_STATUS_LANES(pci_link_status),
- GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
+ pr_info("%s%s\n", BDX_NIC_NAME,
+ nic->port_num == 1 ? "" : ", 2-Port");
+ pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
+ readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
+ readl(nic->regs + FPGA_SEED),
+ GET_LINK_STATUS_LANES(pci_link_status),
+ GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
}
static void print_fw_id(struct pci_nic *nic)
{
- printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER));
+ pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
}
static void print_eth_id(struct net_device *ndev)
{
- printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME,
- (ndev->if_port == 0) ? 'A' : 'B');
+ netdev_info(ndev, "%s, Port %c\n",
+ BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
}
@@ -160,7 +160,7 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
f->va = pci_alloc_consistent(priv->pdev,
memsz + FIFO_EXTRA_SPACE, &f->da);
if (!f->va) {
- ERR("pci_alloc_consistent failed\n");
+ pr_err("pci_alloc_consistent failed\n");
RET(-ENOMEM);
}
f->reg_CFG0 = reg_CFG0;
@@ -204,13 +204,13 @@ static void bdx_link_changed(struct bdx_priv *priv)
if (netif_carrier_ok(priv->ndev)) {
netif_stop_queue(priv->ndev);
netif_carrier_off(priv->ndev);
- ERR("%s: Link Down\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Link Down\n");
}
} else {
if (!netif_carrier_ok(priv->ndev)) {
netif_wake_queue(priv->ndev);
netif_carrier_on(priv->ndev);
- ERR("%s: Link Up\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Link Up\n");
}
}
}
@@ -226,10 +226,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
bdx_link_changed(priv);
if (isr & IR_PCIE_LINK)
- ERR("%s: PCI-E Link Fault\n", priv->ndev->name);
+ netdev_err(priv->ndev, "PCI-E Link Fault\n");
if (isr & IR_PCIE_TOUT)
- ERR("%s: PCI-E Time Out\n", priv->ndev->name);
+ netdev_err(priv->ndev, "PCI-E Time Out\n");
}
@@ -345,7 +345,7 @@ out:
release_firmware(fw);
if (rc) {
- ERR("%s: firmware loading failed\n", priv->ndev->name);
+ netdev_err(priv->ndev, "firmware loading failed\n");
if (rc == -EIO)
DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
READ_REG(priv, regVPC),
@@ -419,9 +419,11 @@ static int bdx_hw_start(struct bdx_priv *priv)
WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
-#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED)
- if ((rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
- ndev->name, ndev)))
+#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
+
+ rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
+ ndev->name, ndev);
+ if (rc)
goto err_irq;
bdx_enable_interrupts(priv);
@@ -462,7 +464,7 @@ static int bdx_hw_reset_direct(void __iomem *regs)
readl(regs + regRXD_CFG0_0);
return 0;
}
- ERR("tehuti: HW reset failed\n");
+ pr_err("HW reset failed\n");
return 1; /* failure */
}
@@ -486,7 +488,7 @@ static int bdx_hw_reset(struct bdx_priv *priv)
READ_REG(priv, regRXD_CFG0_0);
return 0;
}
- ERR("tehuti: HW reset failed\n");
+ pr_err("HW reset failed\n");
return 1; /* failure */
}
@@ -510,8 +512,7 @@ static int bdx_sw_reset(struct bdx_priv *priv)
mdelay(10);
}
if (i == 50)
- ERR("%s: SW reset timeout. continuing anyway\n",
- priv->ndev->name);
+ netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
/* 6. disable intrs */
WRITE_REG(priv, regRDINTCM0, 0);
@@ -604,18 +605,15 @@ static int bdx_open(struct net_device *ndev)
if (netif_running(ndev))
netif_stop_queue(priv->ndev);
- if ((rc = bdx_tx_init(priv)))
- goto err;
-
- if ((rc = bdx_rx_init(priv)))
- goto err;
-
- if ((rc = bdx_fw_load(priv)))
+ if ((rc = bdx_tx_init(priv)) ||
+ (rc = bdx_rx_init(priv)) ||
+ (rc = bdx_fw_load(priv)))
goto err;
bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
- if ((rc = bdx_hw_start(priv)))
+ rc = bdx_hw_start(priv);
+ if (rc)
goto err;
napi_enable(&priv->napi);
@@ -647,7 +645,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
if (cmd != SIOCDEVPRIVATE) {
error = copy_from_user(data, ifr->ifr_data, sizeof(data));
if (error) {
- ERR("cant copy from user\n");
+ pr_err("cant copy from user\n");
RET(error);
}
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
@@ -708,7 +706,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
ENTER;
DBG2("vid=%d value=%d\n", (int)vid, enable);
if (unlikely(vid >= 4096)) {
- ERR("tehuti: invalid VID: %u (> 4096)\n", vid);
+ pr_err("invalid VID: %u (> 4096)\n", vid);
RET();
}
reg = regVLAN_0 + (vid / 32) * 4;
@@ -776,8 +774,8 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
/* enforce minimum frame size */
if (new_mtu < ETH_ZLEN) {
- ERR("%s: %s mtu %d is less then minimal %d\n",
- BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN);
+ netdev_err(ndev, "mtu %d is less then minimal %d\n",
+ new_mtu, ETH_ZLEN);
RET(-EINVAL);
}
@@ -808,7 +806,7 @@ static void bdx_setmulti(struct net_device *ndev)
/* set IMF to accept all multicast frmaes */
for (i = 0; i < MAC_MCST_HASH_NUM; i++)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
u8 hash;
struct dev_mc_list *mclist;
u32 reg, val;
@@ -826,10 +824,8 @@ static void bdx_setmulti(struct net_device *ndev)
/* TBD: sort addreses and write them in ascending order
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
- mclist = ndev->mc_list;
-
/* accept the rest of addresses throu IMF */
- for (; mclist; mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, ndev) {
hash = 0;
for (i = 0; i < ETH_ALEN; i++)
hash ^= mclist->dmi_addr[i];
@@ -840,7 +836,7 @@ static void bdx_setmulti(struct net_device *ndev)
}
} else {
- DBG("only own mac %d\n", ndev->mc_count);
+ DBG("only own mac %d\n", netdev_mc_count(ndev));
rxf_val |= GMAC_RX_FILTER_AB;
}
WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
@@ -1028,17 +1024,16 @@ static int bdx_rx_init(struct bdx_priv *priv)
regRXF_CFG0_0, regRXF_CFG1_0,
regRXF_RPTR_0, regRXF_WPTR_0))
goto err_mem;
- if (!
- (priv->rxdb =
- bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
- sizeof(struct rxf_desc))))
+ priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
+ sizeof(struct rxf_desc));
+ if (!priv->rxdb)
goto err_mem;
priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
return 0;
err_mem:
- ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name);
+ netdev_err(priv->ndev, "Rx init failed\n");
return -ENOMEM;
}
@@ -1115,8 +1110,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
ENTER;
dno = bdx_rxdb_available(db) - 1;
while (dno > 0) {
- if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) {
- ERR("NO MEM: dev_alloc_skb failed\n");
+ skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
+ if (!skb) {
+ pr_err("NO MEM: dev_alloc_skb failed\n");
break;
}
skb->dev = priv->ndev;
@@ -1337,9 +1333,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
u16 rxd_vlan)
{
- DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d "
- "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d "
- "va_lo %d va_hi %d\n",
+ DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
@@ -1591,7 +1585,7 @@ static int bdx_tx_init(struct bdx_priv *priv)
return 0;
err_mem:
- ERR("tehuti: %s: Tx init failed\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Tx init failed\n");
return -ENOMEM;
}
@@ -1609,7 +1603,7 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
fsize = f->m.rptr - f->m.wptr;
if (fsize <= 0)
fsize = f->m.memsz + fsize;
- return (fsize);
+ return fsize;
}
/* bdx_tx_transmit - send packet to NIC
@@ -1857,7 +1851,7 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
* @data - desc's data
* @size - desc's size
*
- * NOTE: this func does check for available space and, if neccessary, waits for
+ * NOTE: this func does check for available space and, if necessary, waits for
* NIC to read existing data before writing new one.
*/
static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
@@ -1937,8 +1931,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
RET(-ENOMEM);
/************** pci *****************/
- if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */
- goto err_pci; /* it's not a problem though */
+ err = pci_enable_device(pdev);
+ if (err) /* it triggers interrupt, dunno why. */
+ goto err_pci; /* it's not a problem though */
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
@@ -1946,14 +1941,14 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- printk(KERN_ERR "tehuti: No usable DMA configuration"
- ", aborting\n");
+ pr_err("No usable DMA configuration, aborting\n");
goto err_dma;
}
pci_using_dac = 0;
}
- if ((err = pci_request_regions(pdev, BDX_DRV_NAME)))
+ err = pci_request_regions(pdev, BDX_DRV_NAME);
+ if (err)
goto err_dma;
pci_set_master(pdev);
@@ -1961,25 +1956,26 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pciaddr = pci_resource_start(pdev, 0);
if (!pciaddr) {
err = -EIO;
- ERR("tehuti: no MMIO resource\n");
+ pr_err("no MMIO resource\n");
goto err_out_res;
}
- if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) {
+ regionSize = pci_resource_len(pdev, 0);
+ if (regionSize < BDX_REGS_SIZE) {
err = -EIO;
- ERR("tehuti: MMIO resource (%x) too small\n", regionSize);
+ pr_err("MMIO resource (%x) too small\n", regionSize);
goto err_out_res;
}
nic->regs = ioremap(pciaddr, regionSize);
if (!nic->regs) {
err = -EIO;
- ERR("tehuti: ioremap failed\n");
+ pr_err("ioremap failed\n");
goto err_out_res;
}
if (pdev->irq < 2) {
err = -EIO;
- ERR("tehuti: invalid irq (%d)\n", pdev->irq);
+ pr_err("invalid irq (%d)\n", pdev->irq);
goto err_out_iomap;
}
pci_set_drvdata(pdev, nic);
@@ -1996,8 +1992,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->irq_type = IRQ_INTX;
#ifdef BDX_MSI
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
- if ((err = pci_enable_msi(pdev)))
- ERR("Tehuti: Can't eneble msi. error is %d\n", err);
+ err = pci_enable_msi(pdev);
+ if (err)
+ pr_err("Can't eneble msi. error is %d\n", err);
else
nic->irq_type = IRQ_MSI;
} else
@@ -2006,9 +2003,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************** netdev **************/
for (port = 0; port < nic->port_num; port++) {
- if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) {
+ ndev = alloc_etherdev(sizeof(struct bdx_priv));
+ if (!ndev) {
err = -ENOMEM;
- printk(KERN_ERR "tehuti: alloc_etherdev failed\n");
+ pr_err("alloc_etherdev failed\n");
goto err_out_iomap;
}
@@ -2075,12 +2073,13 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*bdx_hw_reset(priv); */
if (bdx_read_mac(priv)) {
- printk(KERN_ERR "tehuti: load MAC address failed\n");
+ pr_err("load MAC address failed\n");
goto err_out_iomap;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
- if ((err = register_netdev(ndev))) {
- printk(KERN_ERR "tehuti: register_netdev failed\n");
+ err = register_netdev(ndev);
+ if (err) {
+ pr_err("register_netdev failed\n");
goto err_out_free;
}
netif_carrier_off(ndev);
@@ -2294,13 +2293,13 @@ bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
/* Convert RX fifo size to number of pending packets */
static inline int bdx_rx_fifo_size_to_packets(int rx_size)
{
- return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc));
+ return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
}
/* Convert TX fifo size to number of pending packets */
static inline int bdx_tx_fifo_size_to_packets(int tx_size)
{
- return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ);
+ return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
}
/*
@@ -2392,10 +2391,10 @@ static int bdx_get_sset_count(struct net_device *netdev, int stringset)
case ETH_SS_STATS:
BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
!= sizeof(struct bdx_stats) / sizeof(u64));
- return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
- default:
- return -EINVAL;
+ return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
}
+
+ return -EINVAL;
}
/*
@@ -2493,10 +2492,8 @@ static struct pci_driver bdx_pci_driver = {
*/
static void __init print_driver_id(void)
{
- printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC,
- BDX_DRV_VERSION);
- printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME,
- BDX_MSI_STRING);
+ pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
+ pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
}
static int __init bdx_module_init(void)
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 124141909e42..cff98d07cba8 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -32,6 +32,7 @@
#include <linux/firmware.h>
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
/* Compile Time Switches */
/* start */
@@ -529,28 +530,34 @@ struct txd_desc {
/* Debugging Macros */
-#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args)
-#define DBG2(fmt, args...) \
- printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
+#define DBG2(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
#define BDX_ASSERT(x) BUG_ON(x)
#ifdef DEBUG
-#define ENTER do { \
- printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \
+#define ENTER \
+do { \
+ pr_err("%s:%-5d: ENTER\n", __func__, __LINE__); \
} while (0)
-#define RET(args...) do { \
- printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \
-return args; } while (0)
+#define RET(args...) \
+do { \
+ pr_err("%s:%-5d: RETURN\n", __func__, __LINE__); \
+ return args; \
+} while (0)
-#define DBG(fmt, args...) \
- printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
+#define DBG(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
#else
-#define ENTER do { } while (0)
+#define ENTER do { } while (0)
#define RET(args...) return args
-#define DBG(fmt, args...) do { } while (0)
+#define DBG(fmt, args...) \
+do { \
+ if (0) \
+ pr_err(fmt, ##args); \
+} while (0)
#endif
#endif /* _BDX__H */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 3a74d2168598..ecc41cffb470 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2009 Broadcom Corporation.
+ * Copyright (C) 2005-2010 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -67,9 +67,8 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.105"
-#define DRV_MODULE_RELDATE "December 2, 2009"
+#define DRV_MODULE_VERSION "3.108"
+#define DRV_MODULE_RELDATE "February 17, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -158,7 +157,7 @@
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
static char version[] __devinitdata =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
@@ -174,7 +173,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
-static struct pci_device_id tg3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
@@ -244,6 +243,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -636,7 +641,6 @@ static void tg3_disable_ints(struct tg3 *tp)
static void tg3_enable_ints(struct tg3 *tp)
{
int i;
- u32 coal_now = 0;
tp->irq_sync = 0;
wmb();
@@ -644,13 +648,14 @@ static void tg3_enable_ints(struct tg3 *tp)
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
- coal_now |= tnapi->coal_now;
+ tp->coal_now |= tnapi->coal_now;
}
/* Force an initial interrupt */
@@ -658,8 +663,9 @@ static void tg3_enable_ints(struct tg3 *tp)
(tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
else
- tw32(HOSTCC_MODE, tp->coalesce_mode |
- HOSTCC_MODE_ENABLE | coal_now);
+ tw32(HOSTCC_MODE, tp->coal_now);
+
+ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
}
static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
@@ -948,17 +954,17 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
- case TG3_PHY_ID_BCM50610:
- case TG3_PHY_ID_BCM50610M:
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
val = MAC_PHYCFG2_50610_LED_MODES;
break;
- case TG3_PHY_ID_BCMAC131:
+ case PHY_ID_BCMAC131:
val = MAC_PHYCFG2_AC131_LED_MODES;
break;
- case TG3_PHY_ID_RTL8211C:
+ case PHY_ID_RTL8211C:
val = MAC_PHYCFG2_RTL8211C_LED_MODES;
break;
- case TG3_PHY_ID_RTL8201E:
+ case PHY_ID_RTL8201E:
val = MAC_PHYCFG2_RTL8201E_LED_MODES;
break;
default:
@@ -977,7 +983,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
return;
}
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
+ if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
val |= MAC_PHYCFG2_EMODE_MASK_MASK |
MAC_PHYCFG2_FMODE_MASK_MASK |
MAC_PHYCFG2_GMODE_MASK_MASK |
@@ -990,7 +996,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
val = tr32(MAC_PHYCFG1);
val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
@@ -1008,7 +1014,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
MAC_RGMII_MODE_TX_ENABLE |
MAC_RGMII_MODE_TX_LOWPWR |
MAC_RGMII_MODE_TX_RESET);
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
val |= MAC_RGMII_MODE_RX_INT_B |
MAC_RGMII_MODE_RX_QUALITY |
@@ -1028,6 +1034,17 @@ static void tg3_mdio_start(struct tg3 *tp)
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
+ if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_mdio_config_5785(tp);
+}
+
+static int tg3_mdio_init(struct tg3 *tp)
+{
+ int i;
+ u32 reg;
+ struct phy_device *phydev;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
u32 funcnum, is_serdes;
@@ -1037,23 +1054,16 @@ static void tg3_mdio_start(struct tg3 *tp)
else
tp->phy_addr = 1;
- is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ else
+ is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+ TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
} else
tp->phy_addr = TG3_PHY_MII_ADDR;
- if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
- tg3_mdio_config_5785(tp);
-}
-
-static int tg3_mdio_init(struct tg3 *tp)
-{
- int i;
- u32 reg;
- struct phy_device *phydev;
-
tg3_mdio_start(tp);
if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
@@ -1088,8 +1098,7 @@ static int tg3_mdio_init(struct tg3 *tp)
i = mdiobus_register(tp->mdio_bus);
if (i) {
- printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
- tp->dev->name, i);
+ netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
@@ -1097,35 +1106,35 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
- printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
+ netdev_warn(tp->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
}
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
- case TG3_PHY_ID_BCM57780:
+ case PHY_ID_BCM57780:
phydev->interface = PHY_INTERFACE_MODE_GMII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
break;
- case TG3_PHY_ID_BCM50610:
- case TG3_PHY_ID_BCM50610M:
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
+ if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
/* fallthru */
- case TG3_PHY_ID_RTL8211C:
+ case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
break;
- case TG3_PHY_ID_RTL8201E:
- case TG3_PHY_ID_BCMAC131:
+ case PHY_ID_RTL8201E:
+ case PHY_ID_BCMAC131:
phydev->interface = PHY_INTERFACE_MODE_MII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
@@ -1241,27 +1250,22 @@ static void tg3_ump_link_report(struct tg3 *tp)
static void tg3_link_report(struct tg3 *tp)
{
if (!netif_carrier_ok(tp->dev)) {
- if (netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: Link is down.\n",
- tp->dev->name);
+ netif_info(tp, link, tp->dev, "Link is down\n");
tg3_ump_link_report(tp);
} else if (netif_msg_link(tp)) {
- printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
- tp->dev->name,
- (tp->link_config.active_speed == SPEED_1000 ?
- 1000 :
- (tp->link_config.active_speed == SPEED_100 ?
- 100 : 10)),
- (tp->link_config.active_duplex == DUPLEX_FULL ?
- "full" : "half"));
-
- printk(KERN_INFO PFX
- "%s: Flow control is %s for TX and %s for RX.\n",
- tp->dev->name,
- (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
- "on" : "off",
- (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
- "on" : "off");
+ netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (tp->link_config.active_speed == SPEED_1000 ?
+ 1000 :
+ (tp->link_config.active_speed == SPEED_100 ?
+ 100 : 10)),
+ (tp->link_config.active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
+
+ netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
+ "on" : "off",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
+ "on" : "off");
tg3_ump_link_report(tp);
}
}
@@ -1460,7 +1464,7 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
phydev->dev_flags, phydev->interface);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
+ netdev_err(tp->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -1560,7 +1564,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
u32 reg;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
@@ -1935,6 +1941,10 @@ static int tg3_phy_reset(struct tg3 *tp)
}
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
+ return 0;
+
tg3_phy_apply_otp(tp);
if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
@@ -1978,7 +1988,7 @@ out:
}
/* Set Extended packet length bit (bit 14) on all chips that */
/* support jumbo frames */
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
/* Cannot do read-modify-write on 5401 */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
} else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
@@ -2015,7 +2025,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
{
struct tg3 *tp_peer = tp;
- if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
+ /* The GPIOs do something completely different on 57765. */
+ if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
@@ -2128,7 +2140,7 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
{
if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
return 1;
- else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
+ else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
if (speed != SPEED_10)
return 1;
} else if (speed == SPEED_10)
@@ -2481,8 +2493,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
break;
default:
- printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
- tp->dev->name, state);
+ netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
+ state);
return -EINVAL;
}
@@ -2544,11 +2556,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
phy_start_aneg(phydev);
phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
- if (phyid != TG3_PHY_ID_BCMAC131) {
- phyid &= TG3_PHY_OUI_MASK;
- if (phyid == TG3_PHY_OUI_1 ||
- phyid == TG3_PHY_OUI_2 ||
- phyid == TG3_PHY_OUI_3)
+ if (phyid != PHY_ID_BCMAC131) {
+ phyid &= PHY_BCM_OUI_MASK;
+ if (phyid == PHY_BCM_OUI_1 ||
+ phyid == PHY_BCM_OUI_2 ||
+ phyid == PHY_BCM_OUI_3)
do_low_power = true;
}
}
@@ -3058,7 +3070,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if (force_reset)
tg3_phy_reset(tp);
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
@@ -3079,7 +3091,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
}
- if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
+ if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
+ TG3_PHY_REV_BCM5401_B0 &&
!(bmsr & BMSR_LSTATUS) &&
tp->link_config.active_speed == SPEED_1000) {
err = tg3_phy_reset(tp);
@@ -3234,7 +3247,7 @@ relink:
/* ??? Without this setting Netgear GA302T PHY does not
* ??? send/receive packets...
*/
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
@@ -3949,7 +3962,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
- if (tp->phy_id == PHY_ID_BCM8002)
+ if (tp->phy_id == TG3_PHY_ID_BCM8002)
tg3_init_bcm8002(tp);
/* Enable link change event even when serdes polling. */
@@ -4322,10 +4335,8 @@ static void tg3_tx_recover(struct tg3 *tp)
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
- printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
- "mapped I/O cycles to the network device, attempting to "
- "recover. Please report the problem to the driver maintainer "
- "and include system chipset information.\n", tp->dev->name);
+ netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
+ "Please report the problem to the driver maintainer and include system chipset information.\n");
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4534,6 +4545,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
pci_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
+
+ /* Ensure that the update to the skb happens after the physical
+ * addresses have been transferred to the new BD location.
+ */
+ smp_wmb();
+
src_map->skb = NULL;
}
@@ -4634,11 +4651,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (skb_size < 0)
goto drop_it;
- ri->skb = NULL;
-
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
+ /* Ensure that the update to the skb happens
+ * after the usage of the old DMA mapping.
+ */
+ smp_wmb();
+
+ ri->skb = NULL;
+
skb_put(skb, len);
} else {
struct sk_buff *copy_skb;
@@ -4693,8 +4715,9 @@ next_pkt:
(*post_ptr)++;
if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
- u32 idx = *post_ptr % TG3_RX_RING_SIZE;
- tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
+ tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
work_mask &= ~RXD_OPAQUE_RING_STD;
rx_std_posted = 0;
}
@@ -4714,7 +4737,7 @@ next_pkt_nopost:
tw32_rx_mbox(tnapi->consmbox, sw_idx);
/* Refill RX ring(s). */
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
if (work_mask & RXD_OPAQUE_RING_STD) {
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
@@ -4736,7 +4759,8 @@ next_pkt_nopost:
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
- napi_schedule(&tp->napi[1].napi);
+ if (tnapi != &tp->napi[1])
+ napi_schedule(&tp->napi[1].napi);
}
return received;
@@ -4768,12 +4792,12 @@ static void tg3_poll_link(struct tg3 *tp)
}
}
-static void tg3_rx_prodring_xfer(struct tg3 *tp,
- struct tg3_rx_prodring_set *dpr,
- struct tg3_rx_prodring_set *spr)
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+ struct tg3_rx_prodring_set *dpr,
+ struct tg3_rx_prodring_set *spr)
{
u32 si, di, cpycnt, src_prod_idx;
- int i;
+ int i, err = 0;
while (1) {
src_prod_idx = spr->rx_std_prod_idx;
@@ -4796,6 +4820,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_std_cons_idx;
di = dpr->rx_std_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_std_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_std_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_std_buffers[di],
&spr->rx_std_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4836,6 +4877,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_jmb_cons_idx;
di = dpr->rx_jmb_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_jmb_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_jmb_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_jmb_buffers[di],
&spr->rx_jmb_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4853,6 +4911,8 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
TG3_RX_JUMBO_RING_SIZE;
}
+
+ return err;
}
static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
@@ -4874,27 +4934,29 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
work_done += tg3_rx(tnapi, budget - work_done);
if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
- int i;
- u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
- u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
+ struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
+ int i, err = 0;
+ u32 std_prod_idx = dpr->rx_std_prod_idx;
+ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
- for (i = 2; i < tp->irq_cnt; i++)
- tg3_rx_prodring_xfer(tp, tnapi->prodring,
- tp->napi[i].prodring);
+ for (i = 1; i < tp->irq_cnt; i++)
+ err |= tg3_rx_prodring_xfer(tp, dpr,
+ tp->napi[i].prodring);
wmb();
- if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
- u32 mbox = TG3_RX_STD_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
- }
+ if (std_prod_idx != dpr->rx_std_prod_idx)
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ dpr->rx_std_prod_idx);
- if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
- u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
- }
+ if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ dpr->rx_jmb_prod_idx);
mmiowb();
+
+ if (err)
+ tw32_f(HOSTCC_MODE, tp->coal_now);
}
return work_done;
@@ -5198,8 +5260,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
err = tg3_init_hw(tp, reset_phy);
if (err) {
- printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
- "aborting.\n", tp->dev->name);
+ netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
@@ -5218,7 +5279,7 @@ static void tg3_poll_controller(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
for (i = 0; i < tp->irq_cnt; i++)
- tg3_interrupt(tp->napi[i].irq_vec, dev);
+ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
}
#endif
@@ -5272,10 +5333,10 @@ out:
static void tg3_dump_short_state(struct tg3 *tp)
{
- printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
- printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
- tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
+ netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
+ tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
+ netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
+ tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
}
static void tg3_tx_timeout(struct net_device *dev)
@@ -5283,8 +5344,7 @@ static void tg3_tx_timeout(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
if (netif_msg_tx_err(tp)) {
- printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
- dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
tg3_dump_short_state(tp);
}
@@ -5448,8 +5508,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5652,8 +5711,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -6000,11 +6058,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
- printk(KERN_WARNING PFX
- "%s: Using a smaller RX standard ring, "
- "only %d out of %d buffers were allocated "
- "successfully.\n",
- tp->dev->name, i, tp->rx_pending);
+ netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
+ i, tp->rx_pending);
if (i == 0)
goto initfail;
tp->rx_pending = i;
@@ -6017,31 +6072,28 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
- if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
- struct tg3_rx_buffer_desc *rxd;
+ if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
+ goto done;
- rxd = &tpr->rx_jmb[i].std;
- rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
- rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
- RXD_FLAG_JUMBO;
- rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
- (i << RXD_OPAQUE_INDEX_SHIFT));
- }
+ for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
+ struct tg3_rx_buffer_desc *rxd;
- for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
- i) < 0) {
- printk(KERN_WARNING PFX
- "%s: Using a smaller RX jumbo ring, "
- "only %d out of %d buffers were "
- "allocated successfully.\n",
- tp->dev->name, i, tp->rx_jumbo_pending);
- if (i == 0)
- goto initfail;
- tp->rx_jumbo_pending = i;
- break;
- }
+ rxd = &tpr->rx_jmb[i].std;
+ rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+ RXD_FLAG_JUMBO;
+ rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
+ for (i = 0; i < tp->rx_jumbo_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
+ i, tp->rx_jumbo_pending);
+ if (i == 0)
+ goto initfail;
+ tp->rx_jumbo_pending = i;
+ break;
}
}
@@ -6154,8 +6206,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
- if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
- tg3_rx_prodring_free(tp, &tp->prodring[j]);
+ tg3_rx_prodring_free(tp, &tp->prodring[j]);
}
}
@@ -6191,9 +6242,10 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
- tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
+ if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
+ tg3_free_rings(tp);
return -ENOMEM;
+ }
}
return 0;
@@ -6240,7 +6292,7 @@ static void tg3_free_consistent(struct tg3 *tp)
tp->hw_stats = NULL;
}
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
+ for (i = 0; i < tp->irq_cnt; i++)
tg3_rx_prodring_fini(tp, &tp->prodring[i]);
}
@@ -6252,7 +6304,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
+ for (i = 0; i < tp->irq_cnt; i++) {
if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
goto err_out;
}
@@ -6317,10 +6369,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
break;
}
- if (tp->irq_cnt == 1)
- tnapi->prodring = &tp->prodring[0];
- else if (i)
- tnapi->prodring = &tp->prodring[i - 1];
+ tnapi->prodring = &tp->prodring[i];
/*
* If multivector RSS is enabled, vector 0 does not handle
@@ -6384,8 +6433,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
if (i == MAX_WAIT_CNT && !silent) {
- printk(KERN_ERR PFX "tg3_stop_block timed out, "
- "ofs=%lx enable_bit=%x\n",
+ pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
ofs, enable_bit);
return -ENODEV;
}
@@ -6432,9 +6480,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
break;
}
if (i >= MAX_WAIT_CNT) {
- printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
- "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
- tp->dev->name, tr32(MAC_TX_MODE));
+ netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
+ __func__, tr32(MAC_TX_MODE));
err |= -ENODEV;
}
@@ -6655,8 +6702,14 @@ static int tg3_poll_fw(struct tg3 *tp)
!(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
- printk(KERN_INFO PFX "%s: No firmware running.\n",
- tp->dev->name);
+ netdev_info(tp->dev, "No firmware running\n");
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ /* The 57765 A0 needs a little more
+ * time to do some important work.
+ */
+ mdelay(10);
}
return 0;
@@ -7077,10 +7130,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
}
if (i >= 10000) {
- printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
- "and %s CPU\n",
- tp->dev->name,
- (offset == RX_CPU_BASE ? "RX" : "TX"));
+ netdev_err(tp->dev, "%s timed out, %s CPU\n",
+ __func__, offset == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -7105,9 +7156,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
if (cpu_base == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
- "TX cpu firmware on %s which is 5705.\n",
- tp->dev->name);
+ netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
+ __func__);
return -EINVAL;
}
@@ -7186,10 +7236,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
- "to set RX CPU PC, is %08x should be %08x\n",
- tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
- info.fw_base);
+ netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
+ tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
return -ENODEV;
}
tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
@@ -7252,10 +7300,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
- "to set CPU PC, is %08x should be %08x\n",
- tp->dev->name, tr32(cpu_base + CPU_PC),
- info.fw_base);
+ netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
+ __func__, tr32(cpu_base + CPU_PC), info.fw_base);
return -ENODEV;
}
tw32(cpu_base + CPU_STATE, 0xffffffff);
@@ -7434,10 +7480,13 @@ static void tg3_rings_reset(struct tg3 *tp)
for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
tp->napi[i].tx_prod = 0;
tp->napi[i].tx_cons = 0;
- tw32_mailbox(tp->napi[i].prodmbox, 0);
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
tw32_rx_mbox(tp->napi[i].consmbox, 0);
tw32_mailbox_f(tp->napi[i].int_mbox, 1);
}
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
} else {
tp->napi[0].tx_prod = 0;
tp->napi[0].tx_cons = 0;
@@ -7523,8 +7572,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_abort_hw(tp, 1);
}
- if (reset_phy &&
- !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
+ if (reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
@@ -7569,6 +7617,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
+ if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7700,8 +7762,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
}
if (i >= 2000) {
- printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
- tp->dev->name);
+ netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
return -ENODEV;
}
@@ -7742,7 +7803,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tpr->rx_std_mapping & 0xffffffff));
- if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC);
@@ -7767,7 +7828,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
(RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
BDINFO_FLAGS_USE_EXT_RECV);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -7829,6 +7890,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
@@ -8138,7 +8202,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = 1;
+ else
+ val = 2;
+ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
@@ -8557,16 +8625,15 @@ static int tg3_test_msi(struct tg3 *tp)
return err;
/* MSI test failed, go back to INTx mode */
- printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
- "switching to INTx mode. Please report this failure to "
- "the PCI maintainer and include system chipset information.\n",
- tp->dev->name);
+ netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
+ "Please report this failure to the PCI maintainer and include system chipset information\n");
free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
pci_disable_msi(tp->pdev);
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+ tp->napi[0].irq_vec = tp->pdev->irq;
err = tg3_request_irq(tp, 0);
if (err)
@@ -8593,8 +8660,8 @@ static int tg3_request_firmware(struct tg3 *tp)
const __be32 *fw_data;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
- printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
- tp->dev->name, tp->fw_needed);
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ tp->fw_needed);
return -ENOENT;
}
@@ -8607,8 +8674,8 @@ static int tg3_request_firmware(struct tg3 *tp)
tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
if (tp->fw_len < (tp->fw->size - 12)) {
- printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
- tp->dev->name, tp->fw_len, tp->fw_needed);
+ netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
+ tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
tp->fw = NULL;
return -EINVAL;
@@ -8646,9 +8713,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
return false;
if (pci_enable_msix(tp->pdev, msix_ent, rc))
return false;
- printk(KERN_NOTICE
- "%s: Requested %d MSI-X vectors, received %d\n",
- tp->dev->name, tp->irq_cnt, rc);
+ netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
+ tp->irq_cnt, rc);
tp->irq_cnt = rc;
}
@@ -8673,8 +8739,7 @@ static void tg3_ints_init(struct tg3 *tp)
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
- printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
- "Not using MSI.\n", tp->dev->name);
+ netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
goto defcfg;
}
@@ -8719,12 +8784,10 @@ static int tg3_open(struct net_device *dev)
if (err)
return err;
} else if (err) {
- printk(KERN_WARNING "%s: TSO capability disabled.\n",
- tp->dev->name);
+ netdev_warn(tp->dev, "TSO capability disabled\n");
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
} else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
- printk(KERN_NOTICE "%s: TSO capability restored.\n",
- tp->dev->name);
+ netdev_notice(tp->dev, "TSO capability restored\n");
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
}
}
@@ -9390,21 +9453,18 @@ static void __tg3_set_rx_mode(struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
tg3_set_multi (tp, 1);
- } else if (dev->mc_count < 1) {
+ } else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
tg3_set_multi (tp, 0);
} else {
/* Accept one or more multicast(s). */
struct dev_mc_list *mclist;
- unsigned int i;
u32 mc_filter[4] = { 0, };
u32 regidx;
u32 bit;
u32 crc;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
bit = ~crc & 0x7f;
regidx = (bit & 0x60) >> 5;
@@ -9717,7 +9777,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
- if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
+ if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
mask |= ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full;
@@ -9996,56 +10056,66 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
int err = 0;
if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return -EAGAIN;
+ u32 newadv;
+ struct phy_device *phydev;
- if (epause->autoneg) {
- u32 newadv;
- struct phy_device *phydev;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ ((epause->rx_pause && !epause->tx_pause) ||
+ (!epause->rx_pause && epause->tx_pause))))
+ return -EINVAL;
- if (epause->rx_pause) {
- if (epause->tx_pause)
- newadv = ADVERTISED_Pause;
- else
- newadv = ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
- } else if (epause->tx_pause) {
- newadv = ADVERTISED_Asym_Pause;
+ tp->link_config.flowctrl = 0;
+ if (epause->rx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_RX;
+
+ if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Pause;
} else
- newadv = 0;
-
- if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
- u32 oldadv = phydev->advertising &
- (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- if (oldadv != newadv) {
- phydev->advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- phydev->advertising |= newadv;
- err = phy_start_aneg(phydev);
+ newadv = ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
+
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+ u32 oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg) {
+ /*
+ * Always renegotiate the link to
+ * inform our link partner of our
+ * flow control settings, even if the
+ * flow control is forced. Let
+ * tg3_adjust_link() do the final
+ * flow control setup.
+ */
+ return phy_start_aneg(phydev);
}
- } else {
- tp->link_config.advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- tp->link_config.advertising |= newadv;
}
- } else {
- if (epause->rx_pause)
- tp->link_config.flowctrl |= FLOW_CTRL_RX;
- else
- tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
-
- if (epause->tx_pause)
- tp->link_config.flowctrl |= FLOW_CTRL_TX;
- else
- tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
- if (netif_running(dev))
+ if (!epause->autoneg)
tg3_setup_flow_control(tp, 0, 0);
+ } else {
+ tp->link_config.orig_advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ tp->link_config.orig_advertising |= newadv;
}
} else {
int irq_sync = 0;
@@ -10579,8 +10649,7 @@ static int tg3_test_registers(struct tg3 *tp)
out:
if (netif_msg_hw(tp))
- printk(KERN_ERR PFX "Register test failed at offset %x\n",
- offset);
+ pr_err("Register test failed at offset %x\n", offset);
tw32(offset, save_val);
return -EIO;
}
@@ -10635,12 +10704,27 @@ static int tg3_test_memory(struct tg3 *tp)
{ 0x00008000, 0x01000},
{ 0x00010000, 0x01000},
{ 0xffffffff, 0x00000}
+ }, mem_tbl_5717[] = {
+ { 0x00000200, 0x00008},
+ { 0x00010000, 0x0a000},
+ { 0x00020000, 0x13c00},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_57765[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x09800},
+ { 0x00010000, 0x0a000},
+ { 0xffffffff, 0x00000}
};
struct mem_entry *mem_tbl;
int err = 0;
int i;
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ mem_tbl = mem_tbl_5717;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ mem_tbl = mem_tbl_57765;
+ else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
mem_tbl = mem_tbl_5755;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
@@ -10673,12 +10757,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
struct tg3_napi *tnapi, *rnapi;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
if (tp->irq_cnt > 1) {
- tnapi = &tp->napi[1];
rnapi = &tp->napi[1];
- } else {
- tnapi = &tp->napi[0];
- rnapi = &tp->napi[0];
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tnapi = &tp->napi[1];
}
coal_now = tnapi->coal_now | rnapi->coal_now;
@@ -10715,8 +10799,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
- tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
+ tg3_writephy(tp, MII_TG3_FET_PTEST,
+ MII_TG3_FET_PTEST_FRC_TX_LINK |
+ MII_TG3_FET_PTEST_FRC_TX_LOCK);
+ /* The write needs to be flushed for the AC131 */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
mac_mode |= MAC_MODE_PORT_MODE_MII;
} else
mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -10728,9 +10816,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
+ u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
+ if (masked_phy_id == TG3_PHY_ID_BCM5401)
mac_mode &= ~MAC_MODE_LINK_POLARITY;
- else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
+ else if (masked_phy_id == TG3_PHY_ID_BCM5411)
mac_mode |= MAC_MODE_LINK_POLARITY;
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -11687,8 +11776,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_NVRAM;
if (tg3_nvram_lock(tp)) {
- printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
- "tg3_nvram_init failed.\n", tp->dev->name);
+ netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
+ __func__);
return;
}
tg3_enable_nvram_access(tp);
@@ -11986,45 +12075,71 @@ struct subsys_tbl_ent {
u32 phy_id;
};
-static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
+static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
/* Broadcom boards. */
- { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
- { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
- { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
- { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
- { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
- { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
- { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
- { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
- { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
- { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
- { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
/* 3com boards. */
- { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
- { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
- { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
- { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
- { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
/* DELL boards. */
- { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
- { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
- { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
- { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
/* Compaq boards. */
- { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
- { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
- { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
- { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
- { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
/* IBM boards. */
- { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
+ { TG3PCI_SUBVENDOR_ID_IBM,
+ TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
};
-static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
+static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
{
int i;
@@ -12065,7 +12180,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
- tp->phy_id = PHY_ID_INVALID;
+ tp->phy_id = TG3_PHY_ID_INVALID;
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
/* Assume an onboard device and WOL capable by default. */
@@ -12122,7 +12237,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_id = eeprom_phy_id;
if (eeprom_phy_serdes) {
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
else
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
@@ -12238,8 +12354,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
}
- if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
+ if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
+ tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
@@ -12315,7 +12431,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
err = 0;
if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
- hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
+ hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
} else {
/* Now read the physical PHY_ID from the chip and verify
* that it is sane. If it doesn't look good, we fall back
@@ -12329,17 +12445,17 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
- hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
+ hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
}
- if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
+ if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
tp->phy_id = hw_phy_id;
- if (hw_phy_id_masked == PHY_ID_BCM8002)
+ if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
else
tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
} else {
- if (tp->phy_id != PHY_ID_INVALID) {
+ if (tp->phy_id != TG3_PHY_ID_INVALID) {
/* Do nothing, phy ID already set up in
* tg3_get_eeprom_hw_cfg().
*/
@@ -12349,13 +12465,13 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
/* No eeprom signature? Try the hardcoded
* subsys device table.
*/
- p = lookup_by_subsys(tp);
+ p = tg3_lookup_by_subsys(tp);
if (!p)
return -ENODEV;
tp->phy_id = p->phy_id;
if (!tp->phy_id ||
- tp->phy_id == PHY_ID_BCM8002)
+ tp->phy_id == TG3_PHY_ID_BCM8002)
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
}
}
@@ -12407,13 +12523,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
}
skip_phy_reset:
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
err = tg3_init_5401phy_dsp(tp);
if (err)
return err;
- }
- if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
err = tg3_init_5401phy_dsp(tp);
}
@@ -12434,7 +12548,8 @@ skip_phy_reset:
static void __devinit tg3_read_partno(struct tg3 *tp)
{
unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
- unsigned int i;
+ unsigned int block_end, rosize, len;
+ int i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12456,7 +12571,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
}
} else {
ssize_t cnt;
- unsigned int pos = 0, i = 0;
+ unsigned int pos = 0;
for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
cnt = pci_read_vpd(tp->pdev, pos,
@@ -12471,51 +12586,33 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
goto out_not_found;
}
- /* Now parse and find the part number. */
- for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) {
- unsigned char val = vpd_data[i];
- unsigned int block_end;
-
- if (val == 0x82 || val == 0x91) {
- i = (i + 3 +
- (vpd_data[i + 1] +
- (vpd_data[i + 2] << 8)));
- continue;
- }
+ i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
- if (val != 0x90)
- goto out_not_found;
+ rosize = pci_vpd_lrdt_size(&vpd_data[i]);
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
+ i += PCI_VPD_LRDT_TAG_SIZE;
- block_end = (i + 3 +
- (vpd_data[i + 1] +
- (vpd_data[i + 2] << 8)));
- i += 3;
+ if (block_end > TG3_NVM_VPD_LEN)
+ goto out_not_found;
- if (block_end > TG3_NVM_VPD_LEN)
- goto out_not_found;
+ i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_PARTNO);
+ if (i < 0)
+ goto out_not_found;
- while (i < (block_end - 2)) {
- if (vpd_data[i + 0] == 'P' &&
- vpd_data[i + 1] == 'N') {
- int partno_len = vpd_data[i + 2];
+ len = pci_vpd_info_field_size(&vpd_data[i]);
- i += 3;
- if (partno_len > TG3_BPN_SIZE ||
- (partno_len + i) > TG3_NVM_VPD_LEN)
- goto out_not_found;
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len > TG3_BPN_SIZE ||
+ (len + i) > TG3_NVM_VPD_LEN)
+ goto out_not_found;
- memcpy(tp->board_part_number,
- &vpd_data[i], partno_len);
+ memcpy(tp->board_part_number, &vpd_data[i], len);
- /* Success. */
- return;
- }
- i += 3 + vpd_data[i + 2];
- }
-
- /* Part number not found. */
- goto out_not_found;
- }
+ return;
out_not_found:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -12532,8 +12629,24 @@ out_not_found:
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
strcpy(tp->board_part_number, "BCM57788");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
+ strcpy(tp->board_part_number, "BCM57761");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
strcpy(tp->board_part_number, "BCM57765");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
+ strcpy(tp->board_part_number, "BCM57781");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
+ strcpy(tp->board_part_number, "BCM57785");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
+ strcpy(tp->board_part_number, "BCM57791");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ strcpy(tp->board_part_number, "BCM57795");
else
strcpy(tp->board_part_number, "none");
}
@@ -12636,6 +12749,12 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
case TG3_EEPROM_SB_REVISION_3:
offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
break;
+ case TG3_EEPROM_SB_REVISION_4:
+ offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_5:
+ offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
+ break;
default:
return;
}
@@ -13096,6 +13215,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
}
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
@@ -13103,8 +13224,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
- printk(KERN_ERR PFX "Cannot find PCI-X "
- "capability, aborting.\n");
+ pr_err("Cannot find PCI-X capability, aborting\n");
return -EIO;
}
@@ -13284,7 +13404,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -13300,8 +13421,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
- printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
- pci_name(tp->pdev));
+ pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
return err;
}
@@ -13384,6 +13504,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (err)
return err;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+ return -ENOTSUPP;
+
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
val &= GRC_MODE_HOST_STACKUP;
@@ -13463,12 +13588,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
err = tg3_phy_probe(tp);
if (err) {
- printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
+ pr_err("(%s) phy probe failed, err %d\n",
pci_name(tp->pdev), err);
/* ... but do not return immediately ... */
tg3_mdio_fini(tp);
@@ -13978,7 +14105,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) {
- printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
+ pr_err("tg3_test_dma() Write the buffer failed %d\n",
+ ret);
break;
}
@@ -13988,7 +14116,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (le32_to_cpu(val) != p[i]) {
- printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
+ pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
+ val, i);
/* ret = -ENODEV here? */
}
p[i] = 0;
@@ -13997,7 +14126,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) {
- printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
+ pr_err("tg3_test_dma() Read the buffer failed %d\n",
+ ret);
break;
}
@@ -14014,7 +14144,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break;
} else {
- printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
+ pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
+ p[i], i);
ret = -ENODEV;
goto out;
}
@@ -14075,9 +14206,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_57765;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_57765;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+ } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14119,26 +14263,28 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
static char * __devinit tg3_phy_string(struct tg3 *tp)
{
- switch (tp->phy_id & PHY_ID_MASK) {
- case PHY_ID_BCM5400: return "5400";
- case PHY_ID_BCM5401: return "5401";
- case PHY_ID_BCM5411: return "5411";
- case PHY_ID_BCM5701: return "5701";
- case PHY_ID_BCM5703: return "5703";
- case PHY_ID_BCM5704: return "5704";
- case PHY_ID_BCM5705: return "5705";
- case PHY_ID_BCM5750: return "5750";
- case PHY_ID_BCM5752: return "5752";
- case PHY_ID_BCM5714: return "5714";
- case PHY_ID_BCM5780: return "5780";
- case PHY_ID_BCM5755: return "5755";
- case PHY_ID_BCM5787: return "5787";
- case PHY_ID_BCM5784: return "5784";
- case PHY_ID_BCM5756: return "5722/5756";
- case PHY_ID_BCM5906: return "5906";
- case PHY_ID_BCM5761: return "5761";
- case PHY_ID_BCM5717: return "5717";
- case PHY_ID_BCM8002: return "8002/serdes";
+ switch (tp->phy_id & TG3_PHY_ID_MASK) {
+ case TG3_PHY_ID_BCM5400: return "5400";
+ case TG3_PHY_ID_BCM5401: return "5401";
+ case TG3_PHY_ID_BCM5411: return "5411";
+ case TG3_PHY_ID_BCM5701: return "5701";
+ case TG3_PHY_ID_BCM5703: return "5703";
+ case TG3_PHY_ID_BCM5704: return "5704";
+ case TG3_PHY_ID_BCM5705: return "5705";
+ case TG3_PHY_ID_BCM5750: return "5750";
+ case TG3_PHY_ID_BCM5752: return "5752";
+ case TG3_PHY_ID_BCM5714: return "5714";
+ case TG3_PHY_ID_BCM5780: return "5780";
+ case TG3_PHY_ID_BCM5755: return "5755";
+ case TG3_PHY_ID_BCM5787: return "5787";
+ case TG3_PHY_ID_BCM5784: return "5784";
+ case TG3_PHY_ID_BCM5756: return "5722/5756";
+ case TG3_PHY_ID_BCM5906: return "5906";
+ case TG3_PHY_ID_BCM5761: return "5761";
+ case TG3_PHY_ID_BCM5718C: return "5718C";
+ case TG3_PHY_ID_BCM5718S: return "5718S";
+ case TG3_PHY_ID_BCM57765: return "57765";
+ case TG3_PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
}
@@ -14280,7 +14426,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
static int __devinit tg3_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int tg3_version_printed = 0;
struct net_device *dev;
struct tg3 *tp;
int i, err, pm_cap;
@@ -14288,20 +14433,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
char str[40];
u64 dma_mask, persist_dma_mask;
- if (tg3_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ printk_once(KERN_INFO "%s\n", version);
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device, "
- "aborting.\n");
+ pr_err("Cannot enable PCI device, aborting\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ pr_err("Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
@@ -14310,15 +14452,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Find power-management capability. */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
- printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
- "aborting.\n");
+ pr_err("Cannot find PowerManagement capability, aborting\n");
err = -EIO;
goto err_out_free_res;
}
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -14368,8 +14509,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->regs = pci_ioremap_bar(pdev, BAR_0);
if (!tp->regs) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ netdev_err(dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -14385,8 +14525,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_invariants(tp);
if (err) {
- printk(KERN_ERR PFX "Problem fetching invariants of chip, "
- "aborting.\n");
+ netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
@@ -14421,8 +14560,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64 bit "
- "DMA for consistent allocations\n");
+ netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
@@ -14430,8 +14568,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ netdev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_iounmap;
}
}
@@ -14480,16 +14617,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_device_address(tp);
if (err) {
- printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
- "aborting.\n");
+ netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
goto err_out_iounmap;
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
- printk(KERN_ERR PFX "Cannot map APE registers, "
- "aborting.\n");
+ netdev_err(dev, "Cannot map APE registers, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -14513,7 +14648,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp);
if (err) {
- printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
+ netdev_err(dev, "DMA engine test failed, aborting\n");
goto err_out_apeunmap;
}
@@ -14574,45 +14709,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ netdev_err(dev, "Cannot register net device, aborting\n");
goto err_out_apeunmap;
}
- printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
- dev->name,
- tp->board_part_number,
- tp->pci_chip_rev_id,
- tg3_bus_string(tp, str),
- dev->dev_addr);
+ netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
+ tp->board_part_number,
+ tp->pci_chip_rev_id,
+ tg3_bus_string(tp, str),
+ dev->dev_addr);
if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
struct phy_device *phydev;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- printk(KERN_INFO
- "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
- tp->dev->name, phydev->drv->name,
- dev_name(&phydev->dev));
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
} else
- printk(KERN_INFO
- "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
- tp->dev->name, tg3_phy_string(tp),
- ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
- ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
- "10/100/1000Base-T")),
- (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
-
- printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
- dev->name,
- (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
- (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
- (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
- (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
- (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
- printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
- dev->name, tp->dma_rwctrl,
- (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
- (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
+ tg3_phy_string(tp),
+ ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
+ ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
+ "10/100/1000Base-T")),
+ (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
+
+ netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
+ (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
+ (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
+ (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+ netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+ tp->dma_rwctrl,
+ pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
+ ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
return 0;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cd30889650f8..574a1cc4d353 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,6 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2007-2010 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -55,7 +56,39 @@
#define TG3PCI_DEVICE_TIGON3_57765 0x16b4
#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
-/* 0x04 --> 0x64 unused */
+/* 0x04 --> 0x2c unused */
+#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009
+#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004
+#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007
+#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008
+#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL
+#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1
+#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
+#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
+#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
+#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099
+#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM
+#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281
+/* 0x30 --> 0x64 unused */
#define TG3PCI_MSI_DATA 0x00000064
/* 0x66 --> 0x68 unused */
#define TG3PCI_MISC_HOST_CTRL 0x00000068
@@ -109,6 +142,7 @@
#define CHIPREV_ID_57780_A0 0x57780000
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
+#define CHIPREV_ID_57765_A0 0x57785000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1054,6 +1088,8 @@
#define CPMU_MUTEX_REQ_DRIVER 0x00001000
#define TG3_CPMU_MUTEX_GNT 0x00003660
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
+#define TG3_CPMU_PHY_STRAP 0x00003664
+#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
/* 0x3664 --> 0x3800 unused */
/* Mbuf cluster free registers */
@@ -1203,14 +1239,18 @@
#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
+#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
#define BUFMGR_MB_HIGH_WATER 0x00004418
#define DEFAULT_MB_HIGH_WATER 0x00000060
#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
+#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
#define BUFMGR_MB_ALLOC_BIT 0x10000000
#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
@@ -1250,6 +1290,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
#define RDMAC_STATUS 0x00004804
@@ -1540,6 +1581,8 @@
#define GRC_MODE_HOST_SENDBDS 0x00020000
#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
+#define GRC_MODE_PCIE_TL_SEL 0x00000000
+#define GRC_MODE_PCIE_PL_SEL 0x00400000
#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
@@ -1547,7 +1590,13 @@
#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
+#define GRC_MODE_PCIE_DL_SEL 0x20000000
#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
+#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
+#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
+ GRC_MODE_PCIE_PL_SEL | \
+ GRC_MODE_PCIE_DL_SEL | \
+ GRC_MODE_PCIE_HI_1K_EN)
#define GRC_MISC_CFG 0x00006804
#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
@@ -1801,6 +1850,11 @@
/* 0x7e74 --> 0x8000 unused */
+/* Alternate PCIE definitions */
+#define TG3_PCIE_TLDLPL_PORT 0x00007c00
+#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
+#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
#define TG3_OTP_AGCTGT_SHIFT 1
@@ -1842,6 +1896,8 @@
#define TG3_EEPROM_SB_REVISION_0 0x00000000
#define TG3_EEPROM_SB_REVISION_2 0x00020000
#define TG3_EEPROM_SB_REVISION_3 0x00030000
+#define TG3_EEPROM_SB_REVISION_4 0x00040000
+#define TG3_EEPROM_SB_REVISION_5 0x00050000
#define TG3_EEPROM_MAGIC_HW 0xabcd
#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
@@ -1859,6 +1915,8 @@
#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18
+#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c
+#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20
#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700
#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8
#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff
@@ -1953,7 +2011,7 @@
#define NIC_SRAM_DATA_CFG_4 0x00000d60
#define NIC_SRAM_GMII_MODE 0x00000002
-#define NIC_SRAM_RGMII_STD_IBND_DISABLE 0x00000004
+#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004
#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
@@ -2090,6 +2148,9 @@
/* Fast Ethernet Tranceiver definitions */
#define MII_TG3_FET_PTEST 0x17
+#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
+#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2679,6 +2740,7 @@ struct tg3 {
struct net_device *dev;
struct pci_dev *pdev;
+ u32 coal_now;
u32 msg_enable;
/* begin "tx thread" cacheline section */
@@ -2697,7 +2759,7 @@ struct tg3 {
struct vlan_group *vlgrp;
#endif
- struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
+ struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
/* begin "everything else" cacheline(s) section */
@@ -2795,7 +2857,7 @@ struct tg3 {
#define TG3_FLG3_USE_PHYLIB 0x00000010
#define TG3_FLG3_MDIOBUS_INITED 0x00000020
#define TG3_FLG3_PHY_CONNECTED 0x00000080
-#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
+#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100
#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
#define TG3_FLG3_CLKREQ_BUG 0x00000800
@@ -2809,6 +2871,7 @@ struct tg3 {
#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
+#define TG3_FLG3_L1PLLPD_EN 0x00800000
struct timer_list timer;
u16 timer_counter;
@@ -2858,42 +2921,50 @@ struct tg3 {
/* PHY info */
u32 phy_id;
-#define PHY_ID_MASK 0xfffffff0
-#define PHY_ID_BCM5400 0x60008040
-#define PHY_ID_BCM5401 0x60008050
-#define PHY_ID_BCM5411 0x60008070
-#define PHY_ID_BCM5701 0x60008110
-#define PHY_ID_BCM5703 0x60008160
-#define PHY_ID_BCM5704 0x60008190
-#define PHY_ID_BCM5705 0x600081a0
-#define PHY_ID_BCM5750 0x60008180
-#define PHY_ID_BCM5752 0x60008100
-#define PHY_ID_BCM5714 0x60008340
-#define PHY_ID_BCM5780 0x60008350
-#define PHY_ID_BCM5755 0xbc050cc0
-#define PHY_ID_BCM5787 0xbc050ce0
-#define PHY_ID_BCM5756 0xbc050ed0
-#define PHY_ID_BCM5784 0xbc050fa0
-#define PHY_ID_BCM5761 0xbc050fd0
-#define PHY_ID_BCM5717 0x5c0d8a00
-#define PHY_ID_BCM5906 0xdc00ac40
-#define PHY_ID_BCM8002 0x60010140
-#define PHY_ID_INVALID 0xffffffff
-#define PHY_ID_REV_MASK 0x0000000f
-#define PHY_REV_BCM5401_B0 0x1
-#define PHY_REV_BCM5401_B2 0x3
-#define PHY_REV_BCM5401_C0 0x6
-#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
-#define TG3_PHY_ID_BCM50610 0x143bd60
-#define TG3_PHY_ID_BCM50610M 0x143bd70
-#define TG3_PHY_ID_BCMAC131 0x143bc70
-#define TG3_PHY_ID_RTL8211C 0x001cc910
-#define TG3_PHY_ID_RTL8201E 0x00008200
-#define TG3_PHY_ID_BCM57780 0x03625d90
-#define TG3_PHY_OUI_MASK 0xfffffc00
-#define TG3_PHY_OUI_1 0x00206000
-#define TG3_PHY_OUI_2 0x0143bc00
-#define TG3_PHY_OUI_3 0x03625c00
+#define TG3_PHY_ID_MASK 0xfffffff0
+#define TG3_PHY_ID_BCM5400 0x60008040
+#define TG3_PHY_ID_BCM5401 0x60008050
+#define TG3_PHY_ID_BCM5411 0x60008070
+#define TG3_PHY_ID_BCM5701 0x60008110
+#define TG3_PHY_ID_BCM5703 0x60008160
+#define TG3_PHY_ID_BCM5704 0x60008190
+#define TG3_PHY_ID_BCM5705 0x600081a0
+#define TG3_PHY_ID_BCM5750 0x60008180
+#define TG3_PHY_ID_BCM5752 0x60008100
+#define TG3_PHY_ID_BCM5714 0x60008340
+#define TG3_PHY_ID_BCM5780 0x60008350
+#define TG3_PHY_ID_BCM5755 0xbc050cc0
+#define TG3_PHY_ID_BCM5787 0xbc050ce0
+#define TG3_PHY_ID_BCM5756 0xbc050ed0
+#define TG3_PHY_ID_BCM5784 0xbc050fa0
+#define TG3_PHY_ID_BCM5761 0xbc050fd0
+#define TG3_PHY_ID_BCM5718C 0x5c0d8a00
+#define TG3_PHY_ID_BCM5718S 0xbc050ff0
+#define TG3_PHY_ID_BCM57765 0x5c0d8a40
+#define TG3_PHY_ID_BCM5906 0xdc00ac40
+#define TG3_PHY_ID_BCM8002 0x60010140
+#define TG3_PHY_ID_INVALID 0xffffffff
+
+#define PHY_ID_RTL8211C 0x001cc910
+#define PHY_ID_RTL8201E 0x00008200
+
+#define TG3_PHY_ID_REV_MASK 0x0000000f
+#define TG3_PHY_REV_BCM5401_B0 0x1
+
+ /* This macro assumes the passed PHY ID is
+ * already masked with TG3_PHY_ID_MASK.
+ */
+#define TG3_KNOWN_PHY_ID(X) \
+ ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \
+ (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \
+ (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \
+ (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \
+ (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \
+ (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \
+ (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
+ (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
+ (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
+ (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002)
u32 led_ctrl;
u32 phy_otp;
@@ -2906,20 +2977,6 @@ struct tg3 {
u32 pci_clock_ctrl;
struct pci_dev *pdev_peer;
- /* This macro assumes the passed PHY ID is already masked
- * with PHY_ID_MASK.
- */
-#define KNOWN_PHY_ID(X) \
- ((X) == PHY_ID_BCM5400 || (X) == PHY_ID_BCM5401 || \
- (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
- (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
- (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
- (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
- (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
- (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
- (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
- (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
-
struct tg3_hw_stats *hw_stats;
dma_addr_t stats_mapping;
struct work_struct reset_task;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb3155..390540c101c7 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
{ "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
-static struct pci_device_id tlan_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
@@ -338,7 +338,7 @@ static int TLan_PhyInternalService( struct net_device * );
static int TLan_PhyDp83840aCheck( struct net_device * );
*/
-static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
+static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
static void TLan_MiiSendData( u16, u32, unsigned );
static void TLan_MiiSync( u16 );
static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
@@ -1314,7 +1314,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
static void TLan_SetMulticastList( struct net_device *dev )
{
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u32 hash1 = 0;
u32 hash2 = 0;
int i;
@@ -1335,7 +1335,8 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
- for ( i = 0; i < dev->mc_count; i++ ) {
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
(char *) &dmi->dmi_addr );
@@ -1346,7 +1347,7 @@ static void TLan_SetMulticastList( struct net_device *dev )
else
hash2 |= ( 1 << ( offset - 32 ) );
}
- dmi = dmi->next;
+ i++;
}
for ( ; i < 3; i++ )
TLan_SetMac( dev, i + 1, NULL );
@@ -2204,7 +2205,7 @@ TLan_ResetAdapter( struct net_device *dev )
u32 data;
u8 data8;
- priv->tlanFullDuplex = FALSE;
+ priv->tlanFullDuplex = false;
priv->phyOnline=0;
netif_carrier_off(dev);
@@ -2259,7 +2260,7 @@ TLan_ResetAdapter( struct net_device *dev )
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
} else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
}
@@ -2651,14 +2652,14 @@ static void TLan_PhyStartLink( struct net_device *dev )
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
} else if ( priv->speed == TLAN_SPEED_10 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_HALF) {
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
} else {
@@ -2695,7 +2696,7 @@ static void TLan_PhyStartLink( struct net_device *dev )
tctl &= ~TLAN_TC_AUISEL;
if ( priv->duplex == TLAN_DUPLEX_FULL ) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( priv->speed == TLAN_SPEED_100 ) {
control |= MII_GC_SPEEDSEL;
@@ -2750,9 +2751,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
mode = an_adv & an_lpa & 0x03E0;
if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( ( ! ( mode & 0x0180 ) ) &&
@@ -2855,8 +2856,8 @@ void TLan_PhyMonitor( struct net_device *dev )
* TLan_MiiReadReg
*
* Returns:
- * 0 if ack received ok
- * 1 otherwise.
+ * false if ack received ok
+ * true if no ack received or other error
*
* Parms:
* dev The device structure containing
@@ -2875,17 +2876,17 @@ void TLan_PhyMonitor( struct net_device *dev )
*
**************************************************************/
-static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
{
u8 nack;
u16 sio, tmp;
u32 i;
- int err;
+ bool err;
int minten;
TLanPrivateInfo *priv = netdev_priv(dev);
unsigned long flags = 0;
- err = FALSE;
+ err = false;
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -2918,7 +2919,7 @@ static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
- err = TRUE;
+ err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 4b82f283e985..d13ff12d7500 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -31,9 +31,6 @@
*
****************************************************************/
-#define FALSE 0
-#define TRUE 1
-
#define TLAN_MIN_FRAME_SIZE 64
#define TLAN_MAX_FRAME_SIZE 1600
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d9629..7d7f3eef1ab3 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -63,6 +63,7 @@
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/firmware.h>
+#include <linux/slab.h>
#include <net/checksum.h>
@@ -117,7 +118,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
* will be stuck with 1555 lines of hex #'s in the code.
*/
-static struct pci_device_id xl_pci_tbl[] =
+static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
{
{PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* terminate list */
@@ -1390,10 +1391,9 @@ static int xl_close(struct net_device *dev)
static void xl_set_rx_mode(struct net_device *dev)
{
struct xl_private *xl_priv = netdev_priv(dev);
- struct dev_mc_list *dmi ;
+ struct dev_mc_list *dmi;
unsigned char dev_mc_address[4] ;
u16 options ;
- int i ;
if (dev->flags & IFF_PROMISC)
options = 0x0004 ;
@@ -1408,7 +1408,7 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a3..515f122777ab 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
#define ABYSS_IO_EXTENT 64
-static struct pci_device_id abyss_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
{ } /* Terminating entry */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 66272f2a0758..1a0967246e2f 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -995,13 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
- mclist = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(mclist, dev) {
address[0] |= mclist->dmi_addr[2];
address[1] |= mclist->dmi_addr[3];
address[2] |= mclist->dmi_addr[4];
address[3] |= mclist->dmi_addr[5];
- mclist = mclist->next;
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d07..7a5fbf5a9d71 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -121,6 +121,7 @@
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/checksum.h>
@@ -146,7 +147,7 @@
static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
" v0.5.3 11/13/02 - Kent Yoder";
-static struct pci_device_id streamer_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
{} /* terminating entry */
};
@@ -1268,7 +1269,6 @@ static void streamer_set_rx_mode(struct net_device *dev)
__u8 options = 0;
struct dev_mc_list *dmi;
unsigned char dev_mc_address[5];
- int i;
writel(streamer_priv->srb, streamer_mmio + LAPA);
options = streamer_priv->streamer_copy_all_options;
@@ -1303,8 +1303,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next)
- {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 456f8bff40be..53f631ebb162 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -21,6 +21,7 @@ static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
#include <linux/module.h>
#include <linux/mca.h>
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c5132..3a25e0434ae2 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
module_param_array(network_monitor, int, NULL, 0);
-static struct pci_device_id olympic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
{ } /* Terminating Entry */
};
@@ -1139,9 +1139,8 @@ static void olympic_set_rx_mode(struct net_device *dev)
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
- struct dev_mc_list *dmi ;
+ struct dev_mc_list *dmi;
unsigned char dev_mc_address[4] ;
- int i ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
@@ -1178,7 +1177,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 5401d86a7be4..e40560137c46 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -36,7 +36,6 @@
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/errno.h>
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index e3c42f5ac4a9..8b508c922410 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -85,7 +85,6 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/errno.h>
@@ -693,7 +692,7 @@ static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
* NOTE: This function should be used whenever the status of any TPL must be
* modified by the driver, because the compiler may otherwise change the
* order of instructions such that writing the TPL status may be executed at
- * an undesireable time. When this function is used, the status is always
+ * an undesirable time. When this function is used, the status is always
* written when the function is called.
*/
static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status)
@@ -1212,10 +1211,9 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
}
else
{
- int i;
- struct dev_mc_list *mclist = dev->mc_list;
- for (i=0; i< dev->mc_count; i++)
- {
+ struct dev_mc_list *mclist;
+
+ netdev_for_each_mc_addr(mclist, dev) {
((char *)(&tp->ocpl.FunctAddr))[0] |=
mclist->dmi_addr[2];
((char *)(&tp->ocpl.FunctAddr))[1] |=
@@ -1224,7 +1222,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
mclist->dmi_addr[4];
((char *)(&tp->ocpl.FunctAddr))[3] |=
mclist->dmi_addr[5];
- mclist = mclist->next;
}
}
tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@ -2266,7 +2263,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
* This function should be used whenever the status of any RPL must be
* modified by the driver, because the compiler may otherwise change the
* order of instructions such that writing the RPL status may be executed
- * at an undesireable time. When this function is used, the status is
+ * at an undesirable time. When this function is used, the status is
* always written when the function is called.
*/
static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdcae..d4c7c0c0a3d6 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
{ {0x03, 0x01}, "3Com Token Link Velocity"},
};
-static struct pci_device_id tmspci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a69c4a48bab9..5b1fbb3c3b51 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -38,7 +38,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/crc32.h>
@@ -48,6 +47,7 @@
#include <linux/rtnetlink.h>
#include <linux/timer.h>
#include <linux/platform_device.h>
+#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -1184,29 +1184,19 @@ static void tsi108_set_rx_mode(struct net_device *dev)
rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
- if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
- struct dev_mc_list *mc = dev->mc_list;
+ struct dev_mc_list *mc;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(data->mc_hash, 0, sizeof(data->mc_hash));
- while (mc) {
+ netdev_for_each_mc_addr(mc, dev) {
u32 hash, crc;
- if (mc->dmi_addrlen == 6) {
- crc = ether_crc(6, mc->dmi_addr);
- hash = crc >> 23;
-
- __set_bit(hash, &data->mc_hash[0]);
- } else {
- printk(KERN_ERR
- "%s: got multicast address of length %d instead of 6.\n",
- dev->name,
- mc->dmi_addrlen);
- }
-
- mc = mc->next;
+ crc = ether_crc(6, mc->dmi_addr);
+ hash = crc >> 23;
+ __set_bit(hash, &data->mc_hash[0]);
}
TSI_WRITE(TSI108_EC_HASHADDR,
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 9f6742fad6ca..007d8e75666d 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -43,8 +43,8 @@ void t21142_media_task(struct work_struct *work)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 2)
- printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
- dev->name, csr12, medianame[dev->if_port]);
+ dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
+ csr12, medianame[dev->if_port]);
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
if (tulip_check_duplex(dev) < 0) {
netif_carrier_off(dev);
@@ -56,23 +56,26 @@ void t21142_media_task(struct work_struct *work)
} else if (tp->nwayset) {
/* Don't screw up a negotiated session! */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Using NWay-set %s media, csr12 %08x\n",
+ medianame[dev->if_port], csr12);
} else if (tp->medialock) {
;
} else if (dev->if_port == 3) {
if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
- "trying NWay.\n", dev->name, csr12);
+ dev_info(&dev->dev,
+ "No 21143 100baseTx link beat, %08x, trying NWay\n",
+ csr12);
t21142_start_nway(dev);
next_tick = 3*HZ;
}
} else if ((csr12 & 0x7000) != 0x5000) {
/* Negotiation failed. Search media types. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
- dev->name, csr12);
+ dev_info(&dev->dev,
+ "21143 negotiation failed, status %08x\n",
+ csr12);
if (!(csr12 & 4)) { /* 10mbps link beat good. */
new_csr6 = 0x82420000;
dev->if_port = 0;
@@ -90,8 +93,8 @@ void t21142_media_task(struct work_struct *work)
iowrite32(1, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev, "Testing new 21143 media %s\n",
+ medianame[dev->if_port]);
if (new_csr6 != (tp->csr6 & ~0x00D5)) {
tp->csr6 &= 0x00D5;
tp->csr6 |= new_csr6;
@@ -119,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
tp->nway = tp->mediasense = 1;
tp->nwayset = tp->lpar = 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
- dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
iowrite32(0x0001, ioaddr + CSR13);
udelay(100);
iowrite32(csr14, ioaddr + CSR14);
@@ -147,8 +150,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
- "%8.8x.\n", dev->name, csr12, csr5, csr14);
+ dev_info(&dev->dev,
+ "21143 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@@ -171,14 +175,15 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port], tp->sym_advertise,
- tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
else
- printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
- " link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
}
if (tp->mtable) {
@@ -201,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
#if 0 /* Restart shouldn't be needed. */
iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n",
+ dev->name, ioread32(ioaddr + CSR5));
#endif
tulip_start_rxtx(tp);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
- dev->name, tp->csr6, ioread32(ioaddr + CSR6),
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6, ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
} else if ((tp->nwayset && (csr5 & 0x08000000) &&
(dev->if_port == 3 || dev->if_port == 5) &&
(csr12 & 2) == 2) ||
@@ -220,9 +225,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
add_timer(&tp->timer);
} else if (dev->if_port == 3 || dev->if_port == 5) {
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "21143 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
del_timer_sync(&tp->timer);
t21142_start_nway(dev);
@@ -232,21 +237,18 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0)
- printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10baseT link beat good\n");
} else if (!(csr12 & 4)) { /* 10mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10mbps sensed media\n");
dev->if_port = 0;
} else if (tp->nwayset) {
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
- dev->name, medianame[dev->if_port], tp->csr6);
+ dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
+ medianame[dev->if_port], tp->csr6);
} else { /* 100mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 100baseTx sensed media\n");
dev->if_port = 3;
tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
iowrite32(0x0003FF7F, ioaddr + CSR14);
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 1cc8cf4425d1..516713fa0a05 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -101,6 +101,10 @@ config TULIP_NAPI_HW_MITIGATION
If in doubt, say Y.
+config TULIP_DM910X
+ def_bool y
+ depends on TULIP && SPARC
+
config DE4X5
tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
depends on PCI || EISA
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb75..19cafc2b418d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -42,6 +42,7 @@
#include <linux/compiler.h>
#include <linux/rtnetlink.h>
#include <linux/crc32.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -337,7 +338,7 @@ static void de21041_media_timer (unsigned long data);
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
-static struct pci_device_id de_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
@@ -382,9 +383,9 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (netif_msg_rx_err(de))
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- de->dev->name, status);
+ dev_warn(&de->dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
de->net_stats.rx_length_errors++;
}
} else if (status & RxError) {
@@ -487,7 +488,7 @@ rx_next:
}
if (!rx_work)
- printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
+ dev_warn(&de->dev->dev, "rx work limit reached\n");
de->rx_tail = rx_tail;
}
@@ -504,7 +505,8 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
if (netif_msg_intr(de))
printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
- dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
+ dev->name, status, dr32(MacMode),
+ de->rx_tail, de->tx_head, de->tx_tail);
dw32(MacStatus, status);
@@ -529,8 +531,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
- printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
- dev->name, status, pci_status);
+ dev_err(&de->dev->dev,
+ "PCI bus error, status=%08x, PCI status=%04x\n",
+ status, pci_status);
}
return IRQ_HANDLED;
@@ -582,7 +585,8 @@ static void de_tx (struct de_private *de)
de->net_stats.tx_packets++;
de->net_stats.tx_bytes += skb->len;
if (netif_msg_tx_done(de))
- printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
+ printk(KERN_DEBUG "%s: tx done, slot %d\n",
+ de->dev->name, tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -674,18 +678,17 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
set_bit_le(index, hash_table);
+ }
- for (i = 0; i < 32; i++) {
- *setup_frm++ = hash_table[i];
- *setup_frm++ = hash_table[i];
- }
- setup_frm = &de->setup_frame[13*6];
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
}
+ setup_frm = &de->setup_frame[13*6];
/* Fill the final entry with our physical address. */
eaddrs = (u16 *)dev->dev_addr;
@@ -698,20 +701,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct dev_mc_list *mclist;
- int i;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
}
/* Fill the unused entries with the broadcast address. */
- memset(setup_frm, 0xff, (15-i)*12);
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
setup_frm = &de->setup_frame[15*6];
/* Fill the final entry with our physical address. */
@@ -738,7 +739,7 @@ static void __de_set_rx_mode (struct net_device *dev)
goto out;
}
- if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
macmode |= AcceptAllMulticast;
goto out;
@@ -746,7 +747,7 @@ static void __de_set_rx_mode (struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
build_setup_frame_hash (de->setup_frame, dev);
else
build_setup_frame_perfect (de->setup_frame, dev);
@@ -870,7 +871,7 @@ static void de_stop_rxtx (struct de_private *de)
udelay(100);
}
- printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
+ dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
}
static inline void de_start_rxtx (struct de_private *de)
@@ -905,8 +906,8 @@ static void de_link_up(struct de_private *de)
if (!netif_carrier_ok(de->dev)) {
netif_carrier_on(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link up, media %s\n",
- de->dev->name, media_name[de->media_type]);
+ dev_info(&de->dev->dev, "link up, media %s\n",
+ media_name[de->media_type]);
}
}
@@ -915,7 +916,7 @@ static void de_link_down(struct de_private *de)
if (netif_carrier_ok(de->dev)) {
netif_carrier_off(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link down\n", de->dev->name);
+ dev_info(&de->dev->dev, "link down\n");
}
}
@@ -925,7 +926,8 @@ static void de_set_media (struct de_private *de)
u32 macmode = dr32(MacMode);
if (de_is_running(de))
- printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name);
+ dev_warn(&de->dev->dev,
+ "chip is running while changing media!\n");
if (de->de21040)
dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -945,15 +947,15 @@ static void de_set_media (struct de_private *de)
macmode &= ~FullDuplex;
if (netif_msg_link(de)) {
- printk(KERN_INFO
- "%s: set link %s\n"
- "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
- "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
- de->dev->name, media_name[media],
- de->dev->name, dr32(MacMode), dr32(SIAStatus),
- dr32(CSR13), dr32(CSR14), dr32(CSR15),
- de->dev->name, macmode, de->media[media].csr13,
- de->media[media].csr14, de->media[media].csr15);
+ dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
+ dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
+ dr32(MacMode), dr32(SIAStatus),
+ dr32(CSR13), dr32(CSR14), dr32(CSR15));
+
+ dev_info(&de->dev->dev,
+ "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ macmode, de->media[media].csr13,
+ de->media[media].csr14, de->media[media].csr15);
}
if (macmode != dr32(MacMode))
dw32(MacMode, macmode);
@@ -992,9 +994,8 @@ static void de21040_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, status %x\n",
- dev->name, media_name[de->media_type],
- status);
+ dev_info(&dev->dev, "%s link ok, status %x\n",
+ media_name[de->media_type], status);
return;
}
@@ -1022,8 +1023,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1079,9 +1080,10 @@ static void de21041_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
- dev->name, media_name[de->media_type],
- dr32(MacMode), status);
+ dev_info(&dev->dev,
+ "%s link ok, mode %x status %x\n",
+ media_name[de->media_type],
+ dr32(MacMode), status);
return;
}
@@ -1150,8 +1152,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1378,8 +1380,7 @@ static int de_open (struct net_device *dev)
rc = de_alloc_rings(de);
if (rc) {
- printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
return rc;
}
@@ -1387,15 +1388,14 @@ static int de_open (struct net_device *dev)
rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
- printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
- dev->name, dev->irq, rc);
+ dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
+ dev->irq, rc);
goto err_out_free;
}
rc = de_init_hw(de);
if (rc) {
- printk(KERN_ERR "%s: h/w init failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
goto err_out_free_irq;
}
@@ -1666,8 +1666,8 @@ static int de_nway_reset(struct net_device *dev)
status = dr32(SIAStatus);
dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
- de->dev->name, status, dr32(SIAStatus));
+ dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
+ status, dr32(SIAStatus));
return 0;
}
@@ -1711,7 +1711,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
de->dev->dev_addr[i] = value;
udelay(1);
if (boguscnt <= 0)
- printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
+ pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
}
}
@@ -1830,9 +1830,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
}
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
- de->board_idx, ofs,
- media_name[de->media_type]);
+ pr_info("de%d: SROM leaf offset %u, default media %s\n",
+ de->board_idx, ofs, media_name[de->media_type]);
/* init SIA register values to defaults */
for (i = 0; i < DE_MAX_MEDIA; i++) {
@@ -1879,9 +1878,9 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
de->media[idx].type = idx;
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: media block #%u: %s",
- de->board_idx, i,
- media_name[de->media[idx].type]);
+ pr_info("de%d: media block #%u: %s",
+ de->board_idx, i,
+ media_name[de->media[idx].type]);
bufp += sizeof (ib->opts);
@@ -1893,13 +1892,13 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
sizeof(ib->csr15);
if (netif_msg_probe(de))
- printk(" (%x,%x,%x)\n",
- de->media[idx].csr13,
- de->media[idx].csr14,
- de->media[idx].csr15);
+ pr_cont(" (%x,%x,%x)\n",
+ de->media[idx].csr13,
+ de->media[idx].csr14,
+ de->media[idx].csr15);
} else if (netif_msg_probe(de))
- printk("\n");
+ pr_cont("\n");
if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
break;
@@ -2005,7 +2004,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* check for invalid IRQ value */
if (pdev->irq < 2) {
rc = -EIO;
- printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
+ pr_err(PFX "invalid irq (%d) for pci dev %s\n",
pdev->irq, pci_name(pdev));
goto err_out_res;
}
@@ -2016,14 +2015,14 @@ static int __devinit de_init_one (struct pci_dev *pdev,
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
- printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
rc = -EIO;
- printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
+ pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pci_name(pdev));
goto err_out_res;
}
@@ -2031,9 +2030,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
- printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1),
- pciaddr, pci_name(pdev));
+ pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pciaddr, pci_name(pdev));
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
@@ -2044,8 +2043,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* make sure hardware is not running */
rc = de_reset_mac(de);
if (rc) {
- printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
goto err_out_iomap;
}
@@ -2065,12 +2063,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
goto err_out_iomap;
/* print info about board and interface just registered */
- printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- de->de21040 ? "21040" : "21041",
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr,
+ dev->irq);
pci_set_drvdata(pdev, dev);
@@ -2158,8 +2155,7 @@ static int de_resume (struct pci_dev *pdev)
if (!netif_running(dev))
goto out_attach;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
de_init_hw(de);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index a8349b7200b5..09b57193a16a 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -450,7 +450,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/eisa.h>
#include <linux/delay.h>
@@ -467,6 +466,7 @@
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -1951,9 +1951,9 @@ static void
SetMulticastFilter(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
u_long iobase = dev->base_addr;
- int i, j, bit, byte;
+ int i, bit, byte;
u16 hashcode;
u32 omr, crc;
char *pa;
@@ -1963,12 +1963,11 @@ SetMulticastFilter(struct net_device *dev)
omr &= ~(OMR_PR | OMR_PM);
pa = build_setup_frame(dev, ALL); /* Build the basic frame */
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
- addrs=dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1984,9 +1983,8 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- for (j=0; j<dev->mc_count; j++) {
- addrs=dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ addrs = dmi->dmi_addr;
for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index ad63621913c3..9568156dea98 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -61,6 +61,8 @@
Test and make sure PCI latency is now correct for all cases.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "dmfe"
#define DRV_VERSION "1.36.4"
#define DRV_RELDATE "2002-01-17"
@@ -72,7 +74,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -92,6 +93,10 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
+#ifdef CONFIG_TULIP_DM910X
+#include <linux/of.h>
+#endif
+
/* Board/System/Debug information/definition ---------------- */
#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
@@ -145,16 +150,17 @@
#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
-#define DMFE_DBUG(dbug_now, msg, value) \
- do { \
- if (dmfe_debug || (dbug_now)) \
- printk(KERN_ERR DRV_NAME ": %s %lx\n",\
- (msg), (long) (value)); \
+#define DMFE_DBUG(dbug_now, msg, value) \
+ do { \
+ if (dmfe_debug || (dbug_now)) \
+ pr_err("%s %lx\n", \
+ (msg), (long) (value)); \
} while (0)
-#define SHOW_MEDIA_TYPE(mode) \
- printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
- (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_info("Change Speed to %sMhz %s duplex\n" , \
+ (mode & 1) ? "100":"10", \
+ (mode & 4) ? "full":"half");
/* CR9 definition: SROM/MII */
@@ -323,8 +329,8 @@ static void poll_dmfe (struct net_device *dev);
static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
static void allocate_rx_buffer(struct dmfe_board_info *);
static void update_cr6(u32, unsigned long);
-static void send_filter_frame(struct DEVICE * ,int);
-static void dm9132_id_table(struct DEVICE * ,int);
+static void send_filter_frame(struct DEVICE *);
+static void dm9132_id_table(struct DEVICE *);
static u16 phy_read(unsigned long, u8, u8, u32);
static void phy_write(unsigned long, u8, u8, u16, u32);
static void phy_write_1bit(unsigned long, u32);
@@ -377,6 +383,22 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (!printed_version++)
printk(version);
+ /*
+ * SPARC on-board DM910x chips should be handled by the main
+ * tulip driver, except for early DM9100s.
+ */
+#ifdef CONFIG_TULIP_DM910X
+ if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
+ ent->driver_data == PCI_DM9102_ID) {
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+
+ if (dp && of_get_property(dp, "local-mac-address", NULL)) {
+ pr_info("skipping on-board DM910x (use tulip)\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
@@ -384,8 +406,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME
- ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -396,13 +417,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -417,7 +438,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
#endif
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -476,12 +497,9 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (err)
goto err_out_free_buf;
- printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, %pM, irq %d.\n",
- dev->name,
- ent->driver_data >> 16,
- pci_name(pdev),
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16,
+ pci_name(pdev), dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -639,9 +657,9 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
/* Send setup frame */
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev); /* DM9102/DM9102A */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -675,7 +693,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -685,8 +703,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
- db->tx_queue_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
return NETDEV_TX_BUSY;
}
@@ -758,12 +775,11 @@ static int dmfe_stop(struct DEVICE *dev)
#if 0
/* show statistic counter */
- printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
- " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
- db->tx_fifo_underrun, db->tx_excessive_collision,
- db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
- db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
- db->reset_fatal, db->reset_TXtimeout);
+ printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ db->tx_fifo_underrun, db->tx_excessive_collision,
+ db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+ db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+ db->reset_fatal, db->reset_TXtimeout);
#endif
return 0;
@@ -864,7 +880,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
tdes0 = le32_to_cpu(txptr->tdes0);
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
if (tdes0 & 0x80000000)
break;
@@ -874,7 +890,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
/* Transmit statistic counter */
if ( tdes0 != 0x7fffffff ) {
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
dev->stats.collisions += (tdes0 >> 3) & 0xf;
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
if (tdes0 & TDES0_ERR_MASK) {
@@ -971,7 +987,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
/* error summary bit check */
if (rdes0 & 0x8000) {
/* This is a error packet */
- //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+ pr_debug("rdes0: %x\n", rdes0);
dev->stats.rx_errors++;
if (rdes0 & 1)
dev->stats.rx_fifo_errors++;
@@ -1035,6 +1051,7 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
+ int mc_count = netdev_mc_count(dev);
DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
spin_lock_irqsave(&db->lock, flags);
@@ -1047,19 +1064,19 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
- DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
+ DMFE_DBUG(0, "Pass all multicast address", mc_count);
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- DMFE_DBUG(0, "Set multicast address", dev->mc_count);
+ DMFE_DBUG(0, "Set multicast address", mc_count);
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev); /* DM9102/DM9102A */
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1170,8 +1187,7 @@ static void dmfe_timer(unsigned long data)
if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
- printk(KERN_WARNING "%s: Tx timeout - resetting\n",
- dev->name);
+ dev_warn(&dev->dev, "Tx timeout - resetting\n");
}
}
@@ -1435,7 +1451,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
* This setup frame initilize DM910X address filter mode
*/
-static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
+static void dm9132_id_table(struct DEVICE *dev)
{
struct dev_mc_list *mcptr;
u16 * addrptr;
@@ -1455,15 +1471,14 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
ioaddr += 4;
/* Clear Hash Table */
- for (i = 0; i < 4; i++)
- hash_table[i] = 0x0;
+ memset(hash_table, 0, sizeof(hash_table));
/* broadcast address */
hash_table[3] = 0x8000;
/* the multicast address in Hash Table : 64 bits */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
- hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ netdev_for_each_mc_addr(mcptr, dev) {
+ hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -1478,7 +1493,7 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
* This setup frame initilize DM910X address filter mode
*/
-static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
+static void send_filter_frame(struct DEVICE *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
struct dev_mc_list *mcptr;
@@ -1504,14 +1519,14 @@ static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
*suptr++ = 0xffff;
/* fit the multicast address */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
addrptr = (u16 *) mcptr->dmi_addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
}
- for (; i<14; i++) {
+ for (i = netdev_mc_count(dev); i < 14; i++) {
*suptr++ = 0xffff;
*suptr++ = 0xffff;
*suptr++ = 0xffff;
@@ -1625,7 +1640,7 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
else /* DM9102/DM9102A */
phy_mode = phy_read(db->ioaddr,
db->phy_addr, 17, db->chip_id) & 0xf000;
- /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+ pr_debug("Phy_mode %x\n", phy_mode);
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -2068,7 +2083,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
-static struct pci_device_id dmfe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 889f57aae89b..6002e651b9ea 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -13,6 +13,7 @@
*/
#include <linux/pci.h>
+#include <linux/slab.h>
#include "tulip.h"
#include <linux/init.h>
#include <asm/unaligned.h>
@@ -143,6 +144,12 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
void __devinit tulip_parse_eeprom(struct net_device *dev)
{
+ /*
+ dev is not registered at this point, so logging messages can't
+ use dev_<level> or netdev_<level> but dev->name is good via a
+ hack in the caller
+ */
+
/* The last media info list parsed, for multiport boards. */
static struct mediatable *last_mediatable;
static unsigned char *last_ee_data;
@@ -161,15 +168,14 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
if (ee_data[0] == 0xff) {
if (last_mediatable) {
controller_index++;
- printk(KERN_INFO "%s: Controller %d of multiport board.\n",
- dev->name, controller_index);
+ pr_info("%s: Controller %d of multiport board\n",
+ dev->name, controller_index);
tp->mtable = last_mediatable;
ee_data = last_ee_data;
goto subsequent_board;
} else
- printk(KERN_INFO "%s: Missing EEPROM, this interface may "
- "not work correctly!\n",
- dev->name);
+ pr_info("%s: Missing EEPROM, this interface may not work correctly!\n",
+ dev->name);
return;
}
/* Do a fix-up based on the vendor half of the station address prefix. */
@@ -181,16 +187,14 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
i++; /* An Accton EN1207, not an outlaw Maxtech. */
memcpy(ee_data + 26, eeprom_fixups[i].newtable,
sizeof(eeprom_fixups[i].newtable));
- printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using"
- " substitute media control info.\n",
- dev->name, eeprom_fixups[i].name);
+ pr_info("%s: Old format EEPROM on '%s' board. Using substitute media control info\n",
+ dev->name, eeprom_fixups[i].name);
break;
}
}
if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
- printk(KERN_INFO "%s: Old style EEPROM with no media selection "
- "information.\n",
- dev->name);
+ pr_info("%s: Old style EEPROM with no media selection information\n",
+ dev->name);
return;
}
}
@@ -218,7 +222,8 @@ subsequent_board:
/* there is no phy information, don't even try to build mtable */
if (count == 0) {
if (tulip_debug > 0)
- printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name);
+ pr_warning("%s: no phy info, aborting mtable build\n",
+ dev->name);
return;
}
@@ -234,8 +239,10 @@ subsequent_board:
mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
mtable->csr15dir = mtable->csr15val = 0;
- printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
- media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ pr_info("%s: EEPROM default media type %s\n",
+ dev->name,
+ media & 0x0800 ? "Autosense"
+ : medianame[media & MEDIA_MASK]);
for (i = 0; i < count; i++) {
struct medialeaf *leaf = &mtable->mleaf[i];
@@ -298,16 +305,17 @@ subsequent_board:
}
if (tulip_debug > 1 && leaf->media == 11) {
unsigned char *bp = leaf->leafdata;
- printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
- "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
- dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
- bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ pr_info("%s: MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
+ dev->name,
+ bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2],
+ bp[4 + bp[2 + bp[1]*2]*2]);
}
- printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
- "by a %s (%d) block.\n",
- dev->name, i, medianame[leaf->media & 15], leaf->media,
- leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
- leaf->type);
+ pr_info("%s: Index #%d - Media %s (#%d) described by a %s (%d) block\n",
+ dev->name,
+ i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
}
if (new_advertise)
tp->sym_advertise = new_advertise;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 2e8e8ee893c7..1faf7a4d7202 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
#endif
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
do {
if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
- printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
+ printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
break;
}
/* Acknowledge current RX interrupt sources. */
@@ -146,7 +146,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
break;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
dev->name, entry, status);
if (++work_done >= budget)
@@ -177,15 +177,15 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
@@ -226,12 +226,11 @@ int tulip_poll(struct napi_struct *napi, int budget)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (unsigned long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (unsigned long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -365,16 +364,16 @@ static int tulip_rx(struct net_device *dev)
int received = 0;
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
short pkt_len;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
- dev->name, entry, status);
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
+ dev->name, entry, status);
if (--rx_work_limit < 0)
break;
@@ -402,16 +401,16 @@ static int tulip_rx(struct net_device *dev)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
@@ -450,12 +449,11 @@ static int tulip_rx(struct net_device *dev)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -569,7 +567,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#endif /* CONFIG_TULIP_NAPI */
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
dev->name, csr5, ioread32(ioaddr + CSR5));
@@ -601,8 +599,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
/* There was an major error, log it. */
#ifndef final_version
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, status);
#endif
tp->stats.tx_errors++;
if (status & 0x4104) tp->stats.tx_aborted_errors++;
@@ -631,8 +629,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ dev_err(&dev->dev,
+ "Out-of-sync dirty pointer, %d vs. %d\n",
+ dirty_tx, tp->cur_tx);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -643,9 +642,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tp->dirty_tx = dirty_tx;
if (csr5 & TxDied) {
if (tulip_debug > 2)
- printk(KERN_WARNING "%s: The transmitter stopped."
- " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
- dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
+ dev_warn(&dev->dev,
+ "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
+ csr5, ioread32(ioaddr + CSR6),
+ tp->csr6);
tulip_restart_rxtx(tp);
}
spin_unlock(&tp->lock);
@@ -696,8 +696,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
* to the 21142/3 docs that is).
* -- rmk
*/
- printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
- dev->name, tp->nir, error);
+ dev_err(&dev->dev,
+ "(%lu) System Error occurred (%d)\n",
+ tp->nir, error);
}
/* Clear all error sources, included undocumented ones! */
iowrite32(0x0800f7ba, ioaddr + CSR5);
@@ -706,16 +707,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (csr5 & TimerInt) {
if (tulip_debug > 2)
- printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
- dev->name, csr5);
+ dev_err(&dev->dev,
+ "Re-enabling interrupts, %08x\n",
+ csr5);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
tp->ttimer = 0;
oi++;
}
if (tx > maxtx || rx > maxrx || oi > maxoi) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Too much work during an interrupt, "
- "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
+ dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
+ csr5, tp->nir, tx, rx, oi);
/* Acknowledge all interrupt sources. */
iowrite32(0x8001ffff, ioaddr + CSR5);
@@ -764,14 +766,18 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
entry = tp->dirty_rx % RX_RING_SIZE;
if (tp->rx_buffers[entry].skb == NULL) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
+ tp->nir, tp->cur_rx, tp->ttimer, rx);
if (tp->chip_id == LC82C168) {
iowrite32(0x00, ioaddr + CSR7);
mod_timer(&tp->timer, RUN_AT(HZ/50));
} else {
if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) set timer\n",
+ tp->nir);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
ioaddr + CSR7);
iowrite32(TimerInt, ioaddr + CSR5);
@@ -787,8 +793,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
}
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
+ dev->name, ioread32(ioaddr + CSR5));
return IRQ_HANDLED;
}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index d8fda83705bf..68b170ae4d15 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,9 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
switch (mleaf->type) {
case 0: /* 21140 non-MII xcvr. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
- " with control setting %2.2x.\n",
- dev->name, p[1]);
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n",
+ dev->name, p[1]);
dev->if_port = p[0];
if (startup)
iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -205,15 +204,15 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
- "%4.4x/%4.4x.\n",
- dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n",
+ dev->name, medianame[dev->if_port],
+ setup[0], setup[1]);
if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
csr13val = setup[0];
csr14val = setup[1];
@@ -240,8 +239,8 @@ void tulip_select_media(struct net_device *dev, int startup)
if (startup) iowrite32(csr13val, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
- dev->name, csr15dir, csr15val);
+ printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n",
+ dev->name, csr15dir, csr15val);
if (mleaf->type == 4)
new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
else
@@ -317,8 +316,9 @@ void tulip_select_media(struct net_device *dev, int startup)
if (tp->mii_advertise == 0)
tp->mii_advertise = tp->advertising[phy_num];
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
- dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n",
+ dev->name, tp->mii_advertise,
+ tp->phys[phy_num]);
tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
}
break;
@@ -335,8 +335,8 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
@@ -344,20 +344,20 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
default:
- printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
- dev->name, mleaf->type);
+ printk(KERN_DEBUG "%s: Invalid media table selection %d\n",
+ dev->name, mleaf->type);
new_csr6 = 0x020E0000;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
- dev->name, medianame[dev->if_port],
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n",
+ dev->name, medianame[dev->if_port],
ioread32(ioaddr + CSR12) & 0xff);
} else if (tp->chip_id == LC82C168) {
if (startup && ! tp->medialock)
dev->if_port = tp->mii_cnt ? 11 : 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
- dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n",
+ dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
if (tp->mii_cnt) {
new_csr6 = 0x810C0000;
iowrite32(0x0001, ioaddr + CSR15);
@@ -388,10 +388,9 @@ void tulip_select_media(struct net_device *dev, int startup)
} else
new_csr6 = 0x03860000;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No media description table, assuming "
- "%s transceiver, CSR12 %2.2x.\n",
- dev->name, medianame[dev->if_port],
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR12));
}
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -415,16 +414,17 @@ int tulip_check_duplex(struct net_device *dev)
bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
- "%4.4x.\n", dev->name, bmsr, lpa);
+ dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
+ bmsr, lpa);
if (bmsr == 0xffff)
return -2;
if ((bmsr & BMSR_LSTATUS) == 0) {
int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
if ((new_bmsr & BMSR_LSTATUS) == 0) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: No link beat on the MII interface,"
- " status %4.4x.\n", dev->name, new_bmsr);
+ dev_info(&dev->dev,
+ "No link beat on the MII interface, status %04x\n",
+ new_bmsr);
return -1;
}
}
@@ -443,10 +443,10 @@ int tulip_check_duplex(struct net_device *dev)
tulip_restart_rxtx(tp);
if (tulip_debug > 0)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII"
- "#%d link partner capability of %4.4x.\n",
- dev->name, tp->full_duplex ? "full" : "half",
- tp->phys[0], lpa);
+ dev_info(&dev->dev,
+ "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], lpa);
return 1;
}
@@ -501,15 +501,13 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
tp->phys[phy_idx++] = phy;
- printk (KERN_INFO "tulip%d: MII transceiver #%d "
- "config %4.4x status %4.4x advertising %4.4x.\n",
+ pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
board_idx, phy, mii_reg0, mii_status, mii_advert);
/* Fixup for DLink with miswired PHY. */
if (mii_advert != to_advert) {
- printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d,"
- " previously advertising %4.4x.\n",
- board_idx, to_advert, phy, mii_advert);
+ printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
+ board_idx, to_advert, phy, mii_advert);
tulip_mdio_write (dev, phy, 4, to_advert);
}
@@ -554,7 +552,7 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
}
tp->mii_cnt = phy_idx;
if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
- printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+ pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
board_idx);
tp->phys[0] = 1;
}
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index d3253ed09dfc..966efa1a27d7 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
new_csr6 |= 0x00000200;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
- dev->name, phy_reg, medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
if (tp->csr6 != new_csr6) {
tp->csr6 = new_csr6;
/* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
int phy_reg = ioread32(ioaddr + 0xB8);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
- dev->name, phy_reg, csr5);
+ printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n",
+ dev->name, phy_reg, csr5);
if (ioread32(ioaddr + CSR5) & TPLnkFail) {
iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
/* If we use an external MII, then we mustn't use the
@@ -114,9 +114,8 @@ void pnic_timer(unsigned long data)
int csr5 = ioread32(ioaddr + CSR5);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
- "CSR5 %8.8x.\n",
- dev->name, phy_reg, medianame[dev->if_port], csr5);
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
if (phy_reg & 0x04000000) { /* Remote link fault */
iowrite32(0x0201F078, ioaddr + 0xB8);
next_tick = 1*HZ;
@@ -126,10 +125,11 @@ void pnic_timer(unsigned long data)
next_tick = 60*HZ;
} else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
- "CSR5 %8.8x, PHY %3.3x.\n",
- dev->name, medianame[dev->if_port], csr12,
- ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8));
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
+ dev->name, medianame[dev->if_port],
+ csr12,
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + 0xB8));
next_tick = 3*HZ;
if (tp->medialock) {
} else if (tp->nwayset && (dev->if_port & 1)) {
@@ -151,10 +151,11 @@ void pnic_timer(unsigned long data)
tulip_restart_rxtx(tp);
dev->trans_start = jiffies;
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Changing PNIC configuration to %s "
- "%s-duplex, CSR6 %8.8x.\n",
- dev->name, medianame[dev->if_port],
- tp->full_duplex ? "full" : "half", new_csr6);
+ dev_info(&dev->dev,
+ "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
+ medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half",
+ new_csr6);
}
}
}
@@ -162,7 +163,7 @@ too_good_connection:
mod_timer(&tp->timer, RUN_AT(next_tick));
if(!ioread32(ioaddr + CSR7)) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name);
+ dev_info(&dev->dev, "sw timer wakeup\n");
disable_irq(dev->irq);
tulip_refill_rx(dev);
enable_irq(dev->irq);
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index d8418694bf46..b8197666021e 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -87,8 +87,8 @@ void pnic2_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3)
- printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n",
- dev->name,ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
csr14 |= 0x00001184;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
- "csr14=%8.8x.\n", dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
/* tell pnic2_lnk_change we are doing an nway negotiation */
dev->if_port = 0;
@@ -137,8 +137,8 @@ void pnic2_start_nway(struct net_device *dev)
tp->csr6 = ioread32(ioaddr + CSR6);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: On Entry to Nway, "
- "csr6=%8.8x.\n", dev->name, tp->csr6);
+ printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n",
+ dev->name, tp->csr6);
/* mask off any bits not to touch
* comment at top of file explains mask value
@@ -181,9 +181,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
int csr12 = ioread32(ioaddr + CSR12);
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, "
- " CSR5 %x, %8.8x.\n", dev->name, csr12,
- csr5, ioread32(ioaddr + CSR14));
+ dev_info(&dev->dev,
+ "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, ioread32(ioaddr + CSR14));
/* If NWay finished and we have a negotiated partner capability.
* check bits 14:12 for bit pattern 101 - all is good
@@ -215,9 +215,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
else if (negotiated & 0x0020) dev->if_port = 0;
else {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: funny autonegotiate result "
- "csr12 %8.8x advertising %4.4x\n",
- dev->name, csr12, tp->sym_advertise);
+ dev_info(&dev->dev,
+ "funny autonegotiate result csr12 %08x advertising %04x\n",
+ csr12, tp->sym_advertise);
tp->nwayset = 0;
/* so check if 100baseTx link state is okay */
if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
@@ -231,10 +231,11 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port],
- tp->sym_advertise, tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
}
/* remember to turn off bit 7 - autonegotiate
@@ -270,9 +271,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
iowrite32(1, ioaddr + CSR13);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 "
- "%8.8x.\n", dev->name, tp->csr6,
- ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6,
+ ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
/* now the following actually writes out the
* new csr6 values
@@ -282,9 +283,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
return;
} else {
- printk(KERN_INFO "%s: Autonegotiation failed, "
- "using %s, link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
/* remember to turn off bit 7 - autonegotiate
* enable so we don't forget
@@ -339,9 +340,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 100mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
/* check 100 link beat */
@@ -364,9 +365,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 10mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 4) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 4) ? "failed" : "good");
tp->nway = 0;
@@ -385,7 +386,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name);
+ dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
/* if all else fails default to trying 10baseT-HD */
dev->if_port = 0;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index a0e084223082..36c2725ec886 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
unsigned long flags;
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
- " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
- dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6), csr12, ioread32(ioaddr + CSR13),
- ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
+ csr12, ioread32(ioaddr + CSR13),
+ ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
}
switch (tp->chip_id) {
case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
Assume this a generic MII or SYM transceiver. */
next_tick = 60*HZ;
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
- "CSR12 0x%2.2x.\n",
- dev->name, ioread32(ioaddr + CSR6), csr12 & 0xff);
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n",
+ dev->name,
+ ioread32(ioaddr + CSR6), csr12 & 0xff);
break;
}
mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,9 +62,8 @@ void tulip_media_task(struct work_struct *work)
s8 bitnum = p[offset];
if (p[offset+1] & 0x80) {
if (tulip_debug > 1)
- printk(KERN_DEBUG"%s: Transceiver monitor tick "
- "CSR12=%#2.2x, no media sense.\n",
- dev->name, csr12);
+ printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n",
+ dev->name, csr12);
if (mleaf->type == 4) {
if (mleaf->media == 3 && (csr12 & 0x02))
goto select_next_media;
@@ -72,16 +71,16 @@ void tulip_media_task(struct work_struct *work)
break;
}
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
- " bit %d is %d, expecting %d.\n",
- dev->name, csr12, (bitnum >> 1) & 7,
- (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
- (bitnum >= 0));
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
/* Check that the specified bit has the proper value. */
if ((bitnum < 0) !=
((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name,
+ printk(KERN_DEBUG "%s: Link beat detected for %s\n",
+ dev->name,
medianame[mleaf->media & MEDIA_MASK]);
if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
goto actually_mii;
@@ -100,9 +99,9 @@ void tulip_media_task(struct work_struct *work)
if (tulip_media_cap[dev->if_port] & MediaIsFD)
goto select_next_media; /* Skip FD entries. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No link beat on media %s,"
- " trying transceiver type %s.\n",
- dev->name, medianame[mleaf->media & MEDIA_MASK],
+ printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n",
+ dev->name,
+ medianame[mleaf->media & MEDIA_MASK],
medianame[tp->mtable->mleaf[tp->cur_index].media]);
tulip_select_media(dev, 0);
/* Restart the transmit process. */
@@ -151,8 +150,8 @@ void mxic_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3) {
- printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
- ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "MXIC negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
}
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -167,11 +166,10 @@ void comet_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
- "%4.4x.\n",
- dev->name,
- tulip_mdio_read(dev, tp->phys[0], 1),
- tulip_mdio_read(dev, tp->phys[0], 5));
+ printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n",
+ dev->name,
+ tulip_mdio_read(dev, tp->phys[0], 1),
+ tulip_mdio_read(dev, tp->phys[0], 5));
/* mod_timer synchronizes us with potential add_timer calls
* from interrupts.
*/
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0fa3140d65bf..3810db9dc2de 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "tulip.h"
#include <linux/init.h>
#include <linux/etherdevice.h>
@@ -41,7 +42,6 @@
static char version[] __devinitdata =
"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
-
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -196,9 +196,13 @@ struct tulip_chip_table tulip_tbl[] = {
| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
/* DM910X */
+#ifdef CONFIG_TULIP_DM910X
{ "Davicom DM9102/DM9102A", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
tulip_timer, tulip_media_task },
+#else
+ { NULL },
+#endif
/* RS7112 */
{ "Conexant LANfinity", 256, 0x0001ebef,
@@ -207,7 +211,7 @@ struct tulip_chip_table tulip_tbl[] = {
};
-static struct pci_device_id tulip_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
@@ -228,8 +232,10 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+#ifdef CONFIG_TULIP_DM910X
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+#endif
{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
@@ -243,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
+ { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ } /* terminate list */
};
@@ -319,7 +326,8 @@ static void tulip_up(struct net_device *dev)
udelay(100);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
+ dev->name, dev->irq);
iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -380,8 +388,9 @@ static void tulip_up(struct net_device *dev)
(dev->if_port == 12 ? 0 : dev->if_port);
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using user-specified media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev,
+ "Using user-specified media %s\n",
+ medianame[dev->if_port]);
goto media_picked;
}
}
@@ -389,8 +398,9 @@ static void tulip_up(struct net_device *dev)
int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
- dev->name, medianame[looking_for]);
+ dev_info(&dev->dev,
+ "Using EEPROM-set media %s\n",
+ medianame[looking_for]);
goto media_picked;
}
}
@@ -417,9 +427,10 @@ media_picked:
if (tp->mii_cnt) {
tulip_select_media(dev, 1);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Using MII transceiver %d, status "
- "%4.4x.\n",
- dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
+ dev_info(&dev->dev,
+ "Using MII transceiver %d, status %04x\n",
+ tp->phys[0],
+ tulip_mdio_read(dev, tp->phys[0], 1));
iowrite32(csr6_mask_defstate, ioaddr + CSR6);
tp->csr6 = csr6_mask_hdcap;
dev->if_port = 11;
@@ -483,9 +494,10 @@ media_picked:
iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6));
+ printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
+ dev->name, ioread32(ioaddr + CSR0),
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -533,27 +545,30 @@ static void tulip_tx_timeout(struct net_device *dev)
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
/* Do nothing -- the media monitor should handle this. */
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
- dev->name);
+ dev_warn(&dev->dev,
+ "Transmit timeout using MII device\n");
} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
tp->chip_id == DM910X) {
- printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
- "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
- ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ dev_warn(&dev->dev,
+ "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
+ ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
+ ioread32(ioaddr + CSR15));
tp->timeout_recovery = 1;
schedule_work(&tp->media_work);
goto out_unlock;
} else if (tp->chip_id == PNIC2) {
- printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
- "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
- dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
- (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
+ (int)ioread32(ioaddr + CSR5),
+ (int)ioread32(ioaddr + CSR6),
+ (int)ioread32(ioaddr + CSR7),
+ (int)ioread32(ioaddr + CSR12));
} else {
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
- "%8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
dev->if_port = 0;
}
@@ -563,26 +578,26 @@ static void tulip_tx_timeout(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
int j;
- printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
- "%2.2x %2.2x %2.2x.\n",
- i, (unsigned int)tp->rx_ring[i].status,
- (unsigned int)tp->rx_ring[i].length,
- (unsigned int)tp->rx_ring[i].buffer1,
- (unsigned int)tp->rx_ring[i].buffer2,
- buf[0], buf[1], buf[2]);
+ printk(KERN_DEBUG
+ "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
+ i,
+ (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
for (j = 0; buf[j] != 0xee && j < 1600; j++)
if (j < 100)
- printk(KERN_CONT " %2.2x", buf[j]);
- printk(KERN_CONT " j=%d.\n", j);
+ pr_cont(" %02x", buf[j]);
+ pr_cont(" j=%d\n", j);
}
- printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x",
- (unsigned int)tp->rx_ring[i].status);
- printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
+ printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status);
- printk(KERN_CONT "\n");
+ pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
+ pr_cont("\n");
}
#endif
@@ -825,8 +840,9 @@ static int tulip_close (struct net_device *dev)
tulip_down (dev);
if (tulip_debug > 1)
- printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, ioread32 (ioaddr + CSR5));
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "Shutting down ethercard, status was %02x\n",
+ ioread32 (ioaddr + CSR5));
free_irq (dev->irq, dev);
@@ -982,12 +998,10 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
set_bit_le(index, hash_table);
-
}
for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
@@ -1006,20 +1020,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
struct dev_mc_list *mclist;
- int i;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
}
/* Fill the unused entries with the broadcast address. */
- memset(setup_frm, 0xff, (15-i)*12);
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
setup_frm = &tp->setup_frame[15*6];
/* Fill the final entry with our physical address. */
@@ -1042,7 +1054,8 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
csr6 |= AcceptAllMulticast | AcceptAllPhys;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
@@ -1050,15 +1063,14 @@ static void set_rx_mode(struct net_device *dev)
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
struct dev_mc_list *mclist;
- int i;
- if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+ if (netdev_mc_count(dev) > 64) {
+ /* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
if (tp->flags & COMET_MAC_ADDR)
filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
else
@@ -1066,10 +1078,10 @@ static void set_rx_mode(struct net_device *dev)
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
- printk(KERN_INFO "%s: Added filter for %pM"
- " %8.8x bit %d.\n",
- dev->name, mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ dev_info(&dev->dev,
+ "Added filter for %pM %08x bit %d\n",
+ mclist->dmi_addr,
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
@@ -1092,7 +1104,8 @@ static void set_rx_mode(struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) {
+ /* Must use a multicast hash table. */
build_setup_frame_hash(tp->setup_frame, dev);
tx_flags = 0x08400000 | 192;
} else {
@@ -1281,9 +1294,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
unsigned int force_csr0 = 0;
#ifndef MODULE
- static int did_version; /* Already printed version info. */
- if (tulip_debug > 0 && did_version++ == 0)
- printk (KERN_INFO "%s", version);
+ if (tulip_debug > 0)
+ printk_once(KERN_INFO "%s", version);
#endif
board_idx++;
@@ -1294,23 +1306,33 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
- printk (KERN_ERR PFX "skipping LMC card.\n");
+ pr_err(PFX "skipping LMC card\n");
return -ENODEV;
}
/*
- * Early DM9100's need software CRC and the DMFE driver
+ * DM910x chips should be handled by the dmfe driver, except
+ * on-board chips on SPARC systems. Also, early DM9100s need
+ * software CRC which only the dmfe driver supports.
*/
- if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
- {
- /* Read Chip revision */
- if (pdev->revision < 0x30)
- {
- printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+#ifdef CONFIG_TULIP_DM910X
+ if (chip_idx == DM910X) {
+ struct device_node *dp;
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
+ pdev->revision < 0x30) {
+ pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+
+ dp = pci_device_to_OF_node(pdev);
+ if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
+ pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
+#endif
/*
* Looks for early PCI chipsets where people report hangs
@@ -1353,9 +1375,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
i = pci_enable_device(pdev);
if (i) {
- printk (KERN_ERR PFX
- "Cannot enable tulip board #%d, aborting\n",
- board_idx);
+ pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
+ board_idx);
return i;
}
@@ -1364,22 +1385,22 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev) {
- printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
+ pr_err(PFX "ether device alloc failed, aborting\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
- printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, "
- "aborting\n", pci_name(pdev),
- (unsigned long long)pci_resource_len (pdev, 0),
- (unsigned long long)pci_resource_start (pdev, 0));
+ pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
+ pci_name(pdev),
+ (unsigned long long)pci_resource_len (pdev, 0),
+ (unsigned long long)pci_resource_start (pdev, 0));
goto err_out_free_netdev;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, "tulip"))
+ if (pci_request_regions (pdev, DRV_NAME))
goto err_out_free_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1592,8 +1613,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (dev->mem_start & MEDIA_MASK)
tp->default_port = dev->mem_start & MEDIA_MASK;
if (tp->default_port) {
- printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
- board_idx, medianame[tp->default_port & MEDIA_MASK]);
+ pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
+ board_idx, medianame[tp->default_port & MEDIA_MASK]);
tp->medialock = 1;
if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
tp->full_duplex = 1;
@@ -1608,7 +1629,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
if (tp->flags & HAS_MEDIA_TABLE) {
- sprintf(dev->name, "tulip%d", board_idx); /* hack */
+ sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
tulip_parse_eeprom(dev);
strcpy(dev->name, "eth%d"); /* un-hack */
}
@@ -1644,20 +1665,18 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (register_netdev(dev))
goto err_out_free_ring;
- printk(KERN_INFO "%s: %s rev %d at "
+ pci_set_drvdata(pdev, dev);
+
+ dev_info(&dev->dev,
#ifdef CONFIG_TULIP_MMIO
- "MMIO"
+ "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
#else
- "Port"
+ "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
#endif
- " %#llx,", dev->name, chip_name, pdev->revision,
- (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
- pci_set_drvdata(pdev, dev);
-
- if (eeprom_missing)
- printk(" EEPROM not present,");
- printk(" %pM", dev->dev_addr);
- printk(", IRQ %d.\n", irq);
+ chip_name, pdev->revision,
+ (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
+ eeprom_missing ? " EEPROM not present," : "",
+ dev->dev_addr, irq);
if (tp->chip_id == PNIC2)
tp->link_change = pnic2_lnk_change;
@@ -1780,12 +1799,12 @@ static int tulip_resume(struct pci_dev *pdev)
return 0;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
+ pr_err(PFX "pci_enable_device failed in resume\n");
return retval;
}
if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
- printk (KERN_ERR "tulip: request_irq failed in resume\n");
+ pr_err(PFX "request_irq failed in resume\n");
return retval;
}
@@ -1855,7 +1874,7 @@ static struct pci_driver tulip_driver = {
static int __init tulip_init (void)
{
#ifdef MODULE
- printk (KERN_INFO "%s", version);
+ pr_info("%s", version);
#endif
/* copy module parms into globals */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc355..a589dd34891e 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -12,6 +12,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "uli526x"
#define DRV_VERSION "0.9.3"
#define DRV_RELDATE "2005-7-29"
@@ -23,7 +25,6 @@
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
@@ -82,9 +83,16 @@
#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
-#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+#define ULI526X_DBUG(dbug_now, msg, value) \
+do { \
+ if (uli526x_debug || (dbug_now)) \
+ pr_err("%s %lx\n", (msg), (long) (value)); \
+} while (0)
-#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_err("Change Speed to %sMhz %s duplex\n", \
+ mode & 1 ? "100" : "10", \
+ mode & 4 ? "full" : "half");
/* CR9 definition: SROM/MII */
@@ -284,7 +292,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -295,19 +303,19 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -382,9 +390,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
if (err)
goto err_out_res;
- printk(KERN_INFO "%s: ULi M%04lx at pci%s, %pM, irq %d.\n",
- dev->name,ent->driver_data >> 16,pci_name(pdev),
- dev->dev_addr, dev->irq);
+ dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16, pci_name(pdev),
+ dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -516,7 +524,7 @@ static void uli526x_init(struct net_device *dev)
}
}
if(phy_tmp == 32)
- printk(KERN_WARNING "Can not find the phy address!!!");
+ pr_warning("Can not find the phy address!!!");
/* Parser SROM and media mode */
db->media_mode = uli526x_media_mode;
@@ -548,7 +556,7 @@ static void uli526x_init(struct net_device *dev)
update_cr6(db->cr6_data, ioaddr);
/* Send setup frame */
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -582,7 +590,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -592,7 +600,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_packet_cnt);
return NETDEV_TX_BUSY;
}
@@ -842,13 +850,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
if ( !(rdes0 & 0x8000) ||
((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+ struct sk_buff *new_skb = NULL;
+
skb = rxptr->rx_skb_ptr;
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
- if ( (rxlen < RX_COPY_SIZE) &&
- ( (skb = dev_alloc_skb(rxlen + 2) )
- != NULL) ) {
+ if ((rxlen < RX_COPY_SIZE) &&
+ (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+ skb = new_skb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb_reserve(skb, 2); /* 16byte align */
memcpy(skb_put(skb, rxlen),
@@ -897,16 +907,18 @@ static void uli526x_set_filter_mode(struct net_device * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
- ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
+ ULI526X_DBUG(0, "Pass all multicast address",
+ netdev_mc_count(dev));
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1058,7 +1070,7 @@ static void uli526x_timer(unsigned long data)
/* Link Failed */
ULI526X_DBUG(0, "Link Failed", tmp_cr12);
netif_carrier_off(dev);
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
db->link_failed = 1;
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1090,11 +1102,11 @@ static void uli526x_timer(unsigned long data)
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
}
else
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
}
netif_carrier_on(dev);
}
@@ -1104,7 +1116,7 @@ static void uli526x_timer(unsigned long data)
{
if(db->init==1)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
netif_carrier_off(dev);
}
}
@@ -1230,8 +1242,7 @@ static int uli526x_resume(struct pci_dev *pdev)
err = pci_set_power_state(pdev, PCI_D0);
if (err) {
- printk(KERN_WARNING "%s: Could not put device into D0\n",
- dev->name);
+ dev_warn(&dev->dev, "Could not put device into D0\n");
return err;
}
@@ -1405,14 +1416,14 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
*suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
addrptr = (u16 *) mcptr->dmi_addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
}
- for (; i<14; i++) {
+ for (i = netdev_mc_count(dev); i < 14; i++) {
*suptr++ = 0xffff << FLT_SHIFT;
*suptr++ = 0xffff << FLT_SHIFT;
*suptr++ = 0xffff << FLT_SHIFT;
@@ -1432,7 +1443,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data, dev->base_addr);
dev->trans_start = jiffies;
} else
- printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
+ pr_err("No Tx resource - Send_filter_frame!\n");
}
@@ -1783,7 +1794,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
}
-static struct pci_device_id uli526x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
{ 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
{ 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f9..98dbf6cc1d68 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -114,7 +114,6 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -218,7 +217,7 @@ enum chip_capability_flags {
CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
};
-static const struct pci_device_id w840_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
@@ -376,8 +375,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
irq = pdev->irq;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
- pci_name(pdev));
+ pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n",
+ pci_name(pdev));
return -EIO;
}
dev = alloc_etherdev(sizeof(*np));
@@ -422,8 +421,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (option & 0x200)
np->mii_if.full_duplex = 1;
if (option & 15)
- printk(KERN_INFO "%s: ignoring user supplied media type %d",
- dev->name, option & 15);
+ dev_info(&dev->dev,
+ "ignoring user supplied media type %d",
+ option & 15);
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
np->mii_if.full_duplex = 1;
@@ -440,9 +440,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (i)
goto err_out_cleardev;
- printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
- dev->name, pci_id_tbl[chip_idx].name, ioaddr,
- dev->dev_addr, irq);
+ dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
if (np->drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
@@ -453,16 +452,17 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
mdio_read(dev, phy, MII_PHYSID2);
- printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
+ dev_info(&dev->dev,
+ "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
+ np->mii, phy, mii_status,
+ np->mii_if.advertising);
}
}
np->mii_cnt = phy_idx;
np->mii_if.phy_id = np->phys[0];
if (phy_idx == 0) {
- printk(KERN_WARNING "%s: MII PHY not found -- this device may "
- "not operate correctly.\n", dev->name);
+ dev_warn(&dev->dev,
+ "MII PHY not found -- this device may not operate correctly\n");
}
}
@@ -644,8 +644,8 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
- dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: w89c840_open() irq %d\n",
+ dev->name, dev->irq);
if((i=alloc_ringdesc(dev)))
goto out_err;
@@ -657,7 +657,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+ printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name);
/* Set the timer to check for link beat. */
init_timer(&np->timer);
@@ -688,16 +688,18 @@ static int update_link(struct net_device *dev)
if (!(mii_reg & 0x4)) {
if (netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d reports no link. Disabling watchdog\n",
+ np->phys[0]);
netif_carrier_off(dev);
}
return np->csr6;
}
if (!netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d link is back. Enabling watchdog\n",
+ np->phys[0]);
netif_carrier_on(dev);
}
@@ -729,9 +731,10 @@ static int update_link(struct net_device *dev)
if (fasteth)
result |= 0x20000000;
if (result != np->csr6 && debug)
- printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
- dev->name, fasteth ? 100 : 10,
- duplex ? "full" : "half", np->phys[0]);
+ dev_info(&dev->dev,
+ "Setting %dMBit-%s-duplex based on MII#%d\n",
+ fasteth ? 100 : 10, duplex ? "full" : "half",
+ np->phys[0]);
return result;
}
@@ -763,8 +766,8 @@ static inline void update_csr6(struct net_device *dev, int new)
limit--;
if(!limit) {
- printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
- dev->name, csr5);
+ dev_info(&dev->dev,
+ "couldn't stop rxtx, IntrStatus %xh\n", csr5);
break;
}
udelay(1);
@@ -783,10 +786,9 @@ static void netdev_timer(unsigned long data)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
- "config %8.8x.\n",
- dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
spin_lock_irq(&np->lock);
update_csr6(dev, update_link(dev));
spin_unlock_irq(&np->lock);
@@ -899,8 +901,8 @@ static void init_registers(struct net_device *dev)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
- printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
- "alignment to 8 longwords.\n", dev->name);
+ dev_info(&dev->dev,
+ "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
} else {
i |= 0xE000;
}
@@ -931,22 +933,23 @@ static void tx_timeout(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base_addr;
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
+ dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
+ ioread32(ioaddr + IntrStatus));
{
int i;
printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
- printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %8.8x", np->tx_ring[i].status);
- printk("\n");
+ printk(KERN_CONT " %08x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
}
- printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
- np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
- printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
+ printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
+ np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
+ printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
@@ -1055,8 +1058,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (debug > 4) {
- printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
- dev->name, np->cur_tx, entry);
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
+ dev->name, np->cur_tx, entry);
}
return NETDEV_TX_OK;
}
@@ -1073,8 +1076,8 @@ static void netdev_tx_done(struct net_device *dev)
if (tx_status & 0x8000) { /* There was an error, log it. */
#ifndef final_version
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, tx_status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, tx_status);
#endif
np->stats.tx_errors++;
if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1086,8 +1089,8 @@ static void netdev_tx_done(struct net_device *dev)
} else {
#ifndef final_version
if (debug > 3)
- printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
- dev->name, entry, tx_status);
+ printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n",
+ dev->name, entry, tx_status);
#endif
np->stats.tx_bytes += np->tx_skbuff[entry]->len;
np->stats.collisions += (tx_status >> 3) & 15;
@@ -1130,8 +1133,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
if (debug > 4)
- printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Interrupt, status %04x\n",
+ dev->name, intr_status);
if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
break;
@@ -1156,8 +1159,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
netdev_error(dev, intr_status);
if (--work_limit < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n", dev->name, intr_status);
+ dev_warn(&dev->dev,
+ "Too much work at interrupt, status=0x%04x\n",
+ intr_status);
/* Set the timer to re-enable the other interrupts after
10*82usec ticks. */
spin_lock(&np->lock);
@@ -1171,8 +1175,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
} while (1);
if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread32(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n",
+ dev->name, ioread32(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1185,8 +1189,8 @@ static int netdev_rx(struct net_device *dev)
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
if (debug > 4) {
- printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
- entry, np->rx_ring[entry].status);
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n",
+ entry, np->rx_ring[entry].status);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,24 +1199,24 @@ static int netdev_rx(struct net_device *dev)
s32 status = desc->status;
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
- status);
+ printk(KERN_DEBUG " netdev_rx() status was %08x\n",
+ status);
if (status < 0)
break;
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
- "multiple buffers, entry %#x status %4.4x!\n",
- dev->name, np->cur_rx, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
+ np->cur_rx, status);
np->stats.rx_length_errors++;
}
} else if (status & 0x8000) {
/* There was a fatal error. */
if (debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
np->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) np->stats.rx_length_errors++;
if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1229,8 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
- " status %x.\n", pkt_len, status);
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n",
+ pkt_len, status);
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
@@ -1251,11 +1255,10 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version /* Remove after testing. */
/* You will want this info for the initial debug. */
if (debug > 5)
- printk(KERN_DEBUG " Rx data %pM %pM"
- " %2.2x%2.2x %d.%d.%d.%d.\n",
+ printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n",
&skb->data[0], &skb->data[6],
skb->data[12], skb->data[13],
- skb->data[14], skb->data[15], skb->data[16], skb->data[17]);
+ &skb->data[14]);
#endif
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -1293,8 +1296,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Abnormal event, %08x\n",
+ dev->name, intr_status);
if (intr_status == 0xffffffff)
return;
spin_lock(&np->lock);
@@ -1314,8 +1317,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
new = 127; /* load full packet before starting */
new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
#endif
- printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
- dev->name, new);
+ printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n",
+ dev->name, new);
update_csr6(dev, new);
}
if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1357,17 +1360,16 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
| AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
struct dev_mc_list *mclist;
- int i;
+
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
@@ -1487,11 +1489,13 @@ static int netdev_close(struct net_device *dev)
netif_stop_queue(dev);
if (debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
- "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
- dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ dev->name,
+ np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
}
/* Stop the chip's Tx and Rx processes. */
@@ -1512,18 +1516,16 @@ static int netdev_close(struct net_device *dev)
if (debug > 2) {
int i;
- printk(KERN_DEBUG" Tx ring at %8.8x:\n",
- (int)np->tx_ring);
+ printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
- i, np->tx_ring[i].length,
- np->tx_ring[i].status, np->tx_ring[i].buffer1);
- printk(KERN_DEBUG " Rx ring %8.8x:\n",
- (int)np->rx_ring);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++) {
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].length,
- np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
}
}
#endif /* __i386__ debugging only */
@@ -1622,9 +1624,8 @@ static int w840_resume (struct pci_dev *pdev)
goto out; /* device not suspended */
if (netif_running(dev)) {
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR
- "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev,
+ "pci_enable_device failed in resume\n");
goto out;
}
spin_lock_irq(&np->lock);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d6..acfeeb980562 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -14,6 +14,8 @@
* $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -144,7 +146,7 @@ static int link_status(struct xircom_private *card);
-static struct pci_device_id xircom_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
@@ -234,7 +236,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_write_config_word (pdev, PCI_STATUS,tmp16);
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
- printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
+ pr_err("%s: failed to allocate io-region\n", __func__);
return -ENODEV;
}
@@ -245,7 +247,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
if (!dev) {
- printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n");
+ pr_err("%s: failed to allocate etherdev\n", __func__);
goto device_fail;
}
private = netdev_priv(dev);
@@ -253,12 +255,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
/* Allocate the send/receive buffers */
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
if (private->rx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
+ pr_err("%s: no memory for rx buffer\n", __func__);
goto rx_buf_fail;
}
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
if (private->tx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
+ pr_err("%s: no memory for tx buffer\n", __func__);
goto tx_buf_fail;
}
@@ -281,11 +283,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_set_drvdata(pdev, dev);
if (register_netdev(dev)) {
- printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
+ pr_err("%s: netdevice registration failed\n", __func__);
goto reg_fail;
}
- printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq);
+ dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n",
+ pdev->revision, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
@@ -347,8 +350,10 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
+ printk("tx status 0x%08x 0x%08x \n",
+ card->tx_buffer[0], card->tx_buffer[4]);
+ printk("rx status 0x%08x 0x%08x \n",
+ card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
@@ -358,9 +363,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
if (link_status_changed(card)) {
int newlink;
- printk(KERN_DEBUG "xircom_cb: Link status has changed \n");
+ printk(KERN_DEBUG "xircom_cb: Link status has changed\n");
newlink = link_status(card);
- printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink);
+ dev_info(&dev->dev, "Link is %i mbit\n", newlink);
if (newlink)
netif_carrier_on(dev);
else
@@ -457,7 +462,8 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
leave("xircom_open - No IRQ");
@@ -770,7 +776,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
/* enable the receiver */
@@ -787,7 +793,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n");
+ pr_err("Receiver failed to re-activate\n");
}
leave("activate_receiver");
@@ -818,7 +824,7 @@ static void deactivate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
@@ -861,7 +867,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
/* enable the transmitter */
@@ -878,7 +884,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n");
+ pr_err("Transmitter failed to re-activate\n");
}
leave("activate_transmitter");
@@ -909,7 +915,7 @@ static void deactivate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
@@ -1184,7 +1190,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
struct sk_buff *skb;
if (pkt_len > 1518) {
- printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len);
+ pr_err("Packet length %i is bogus\n", pkt_len);
pkt_len = 1518;
}
@@ -1222,7 +1228,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
status = le32_to_cpu(card->tx_buffer[4*descnr]);
#if 0
if (status & 0x8000) { /* Major error */
- printk(KERN_ERR "Major transmit error status %x \n", status);
+ pr_err("Major transmit error status %x\n", status);
card->tx_buffer[4*descnr] = 0;
netif_wake_queue (dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 01e99f22210e..43265207d463 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -61,6 +61,7 @@
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
+#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
@@ -144,6 +145,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
err = 0;
tfile->tun = tun;
tun->tfile = tfile;
+ tun->socket.file = file;
dev_hold(tun->dev);
sock_hold(tun->socket.sk);
atomic_inc(&tfile->count);
@@ -158,6 +160,7 @@ static void __tun_detach(struct tun_struct *tun)
/* Detach from net device */
netif_tx_lock_bh(tun->dev);
tun->tfile = NULL;
+ tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */
@@ -364,6 +367,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (!check_filter(&tun->txflt, skb))
goto drop;
+ if (tun->socket.sk->sk_filter &&
+ sk_filter(tun->socket.sk, skb))
+ goto drop;
+
if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
if (!(tun->flags & TUN_ONE_QUEUE)) {
/* Normal queueing mode. */
@@ -380,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ /* Orphan the skb - required as we might hang on to it
+ * for indefinite time. */
+ skb_orphan(skb);
+
/* Enqueue packet */
skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
dev->trans_start = jiffies;
@@ -387,7 +398,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible(&tun->socket.wait);
+ wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
drop:
@@ -743,7 +755,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
len = min_t(int, skb->len, len);
skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
- total += len;
+ total += skb->len;
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
@@ -751,34 +763,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
return total;
}
-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
add_wait_queue(&tun->socket.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
- if (file->f_flags & O_NONBLOCK) {
+ if (noblock) {
ret = -EAGAIN;
break;
}
@@ -805,6 +806,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
current->state = TASK_RUNNING;
remove_wait_queue(&tun->socket.wait, &wait);
+ return ret;
+}
+
+static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = __tun_get(tfile);
+ ssize_t len, ret;
+
+ if (!tun)
+ return -EBADFD;
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
out:
tun_put(tun);
return ret;
@@ -847,17 +869,49 @@ static void tun_sock_write_space(struct sock *sk)
return;
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
- tun = container_of(sk, struct tun_sock, sk)->tun;
+ tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
}
static void tun_sock_destruct(struct sock *sk)
{
- free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
+ free_netdev(tun_sk(sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ return tun_get_user(tun, m->msg_iov, total_len,
+ m->msg_flags & MSG_DONTWAIT);
+}
+
+static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+ flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops tun_socket_ops = {
+ .sendmsg = tun_sendmsg,
+ .recvmsg = tun_recvmsg,
+};
+
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
@@ -986,11 +1040,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
goto err_free_dev;
init_waitqueue_head(&tun->socket.wait);
+ tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
- container_of(sk, struct tun_sock, sk)->tun = tun;
+ tun_sk(sk)->tun = tun;
security_tun_dev_post_create(sk);
@@ -1116,6 +1171,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
+ struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
int ret;
@@ -1263,6 +1319,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->socket.sk->sk_sndbuf = sndbuf;
break;
+ case TUNATTACHFILTER:
+ /* Can be set only for TAPs */
+ ret = -EINVAL;
+ if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ break;
+ ret = -EFAULT;
+ if (copy_from_user(&fprog, argp, sizeof(fprog)))
+ break;
+
+ ret = sk_attach_filter(&fprog, tun->socket.sk);
+ break;
+
+ case TUNDETACHFILTER:
+ /* Can be set only for TAPs */
+ ret = -EINVAL;
+ if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ break;
+ ret = sk_detach_filter(tun->socket.sk);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -1365,7 +1441,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
__tun_detach(tun);
- /* If desireable, unregister the netdevice. */
+ /* If desirable, unregister the netdevice. */
if (!(tun->flags & TUN_PERSIST)) {
rtnl_lock();
if (dev->reg_state == NETREG_REGISTERED)
@@ -1525,6 +1601,23 @@ static void tun_cleanup(void)
rtnl_link_unregister(&tun_link_ops);
}
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *tun_get_socket(struct file *file)
+{
+ struct tun_struct *tun;
+ if (file->f_op != &tun_fops)
+ return ERR_PTR(-EINVAL);
+ tun = tun_get(file);
+ if (!tun)
+ return ERR_PTR(-EBADFD);
+ tun_put(tun);
+ return &tun->socket;
+}
+EXPORT_SYMBOL_GPL(tun_get_socket);
+
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be6..98d818daa77e 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -98,14 +98,10 @@ static const int multicast_filter_limit = 32;
#define TX_TIMEOUT (2*HZ)
#define PKT_BUF_SZ 1536
-
-#define DRV_MODULE_NAME "typhoon"
-#define DRV_MODULE_VERSION "1.5.9"
-#define DRV_MODULE_RELDATE "Mar 2, 2009"
-#define PFX DRV_MODULE_NAME ": "
-#define ERR_PFX KERN_ERR PFX
#define FIRMWARE_NAME "3com/typhoon.bin"
+#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -113,7 +109,6 @@ static const int multicast_filter_limit = 32;
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -132,14 +127,12 @@ static const int multicast_filter_limit = 32;
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <generated/utsrelease.h>
#include "typhoon.h"
-static char version[] __devinitdata =
- "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_VERSION(UTS_RELEASE);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
@@ -161,8 +154,8 @@ module_param(use_mmio, int, 0);
#endif
struct typhoon_card_info {
- char *name;
- int capabilities;
+ const char *name;
+ const int capabilities;
};
#define TYPHOON_CRYPTO_NONE 0x00
@@ -215,7 +208,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
* bit 8 indicates if this is a (0) copper or (1) fiber card
* bits 12-16 indicate card type: (0) client and (1) server
*/
-static struct pci_device_id typhoon_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
@@ -299,7 +292,6 @@ struct typhoon {
struct basic_ring respRing;
struct net_device_stats stats;
struct net_device_stats stats_saved;
- const char * name;
struct typhoon_shared * shared;
dma_addr_t shared_dma;
__le16 xcvr_select;
@@ -487,7 +479,7 @@ typhoon_hello(struct typhoon *tp)
typhoon_inc_cmd_index(&ring->lastWrite, 1);
INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
- smp_wmb();
+ wmb();
iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
spin_unlock(&tp->command_lock);
}
@@ -534,13 +526,13 @@ typhoon_process_response(struct typhoon *tp, int resp_size,
} else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
typhoon_hello(tp);
} else {
- printk(KERN_ERR "%s: dumping unexpected response "
- "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
- tp->name, le16_to_cpu(resp->cmd),
- resp->numDesc, resp->flags,
- le16_to_cpu(resp->parm1),
- le32_to_cpu(resp->parm2),
- le32_to_cpu(resp->parm3));
+ netdev_err(tp->dev,
+ "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
+ le16_to_cpu(resp->cmd),
+ resp->numDesc, resp->flags,
+ le16_to_cpu(resp->parm1),
+ le32_to_cpu(resp->parm2),
+ le32_to_cpu(resp->parm3));
}
cleanup:
@@ -606,9 +598,8 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
freeResp = typhoon_num_free_resp(tp);
if(freeCmd < num_cmd || freeResp < num_resp) {
- printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
- "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
- freeResp, num_resp);
+ netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
+ freeCmd, num_cmd, freeResp, num_resp);
err = -ENOMEM;
goto out;
}
@@ -733,7 +724,7 @@ typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
spin_unlock_bh(&tp->state_lock);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
- printk("%s: vlan offload error %d\n", tp->name, -err);
+ netdev_err(tp->dev, "vlan offload error %d\n", -err);
spin_lock_bh(&tp->state_lock);
}
@@ -924,17 +915,15 @@ typhoon_set_rx_mode(struct net_device *dev)
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
- } else if((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
- } else if(dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
- int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -1020,7 +1009,7 @@ typhoon_get_stats(struct net_device *dev)
return saved;
if(typhoon_do_get_stats(tp) < 0) {
- printk(KERN_ERR "%s: error getting stats\n", dev->name);
+ netdev_err(dev, "error getting stats\n");
return saved;
}
@@ -1062,8 +1051,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->version, UTS_RELEASE);
strcpy(info->bus_info, pci_name(pci_dev));
}
@@ -1321,13 +1310,15 @@ typhoon_init_interface(struct typhoon *tp)
tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
tp->card_state = Sleeping;
- smp_wmb();
tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
spin_lock_init(&tp->command_lock);
spin_lock_init(&tp->state_lock);
+
+ /* Force the writes to the shared memory area out before continuing. */
+ wmb();
}
static void
@@ -1365,8 +1356,8 @@ typhoon_request_firmware(struct typhoon *tp)
err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
if (err) {
- printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
- tp->name, FIRMWARE_NAME);
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ FIRMWARE_NAME);
return err;
}
@@ -1401,7 +1392,7 @@ typhoon_request_firmware(struct typhoon *tp)
return 0;
invalid_fw:
- printk(KERN_ERR "%s: Invalid firmware image\n", tp->name);
+ netdev_err(tp->dev, "Invalid firmware image\n");
release_firmware(typhoon_fw);
typhoon_fw = NULL;
return -EINVAL;
@@ -1438,7 +1429,7 @@ typhoon_download_firmware(struct typhoon *tp)
err = -ENOMEM;
dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
if(!dpage) {
- printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
+ netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
}
@@ -1451,7 +1442,7 @@ typhoon_download_firmware(struct typhoon *tp)
err = -ETIMEDOUT;
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(KERN_ERR "%s: card ready timeout\n", tp->name);
+ netdev_err(tp->dev, "card ready timeout\n");
goto err_out_irq;
}
@@ -1491,8 +1482,7 @@ typhoon_download_firmware(struct typhoon *tp)
if(typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
- printk(KERN_ERR "%s: segment ready timeout\n",
- tp->name);
+ netdev_err(tp->dev, "segment ready timeout\n");
goto err_out_irq;
}
@@ -1502,8 +1492,8 @@ typhoon_download_firmware(struct typhoon *tp)
* the checksum, we can do this once, at the end.
*/
csum = csum_fold(csum_partial_copy_nocheck(image_data,
- dpage, len,
- 0));
+ dpage, len,
+ 0));
iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
iowrite32(le16_to_cpu((__force __le16)csum),
@@ -1514,7 +1504,7 @@ typhoon_download_firmware(struct typhoon *tp)
iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
typhoon_post_pci_writes(ioaddr);
iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
- ioaddr + TYPHOON_REG_COMMAND);
+ ioaddr + TYPHOON_REG_COMMAND);
image_data += len;
load_addr += len;
@@ -1525,15 +1515,15 @@ typhoon_download_firmware(struct typhoon *tp)
if(typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
- printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
+ netdev_err(tp->dev, "final segment ready timeout\n");
goto err_out_irq;
}
iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
- printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
- tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
+ netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
+ ioread32(ioaddr + TYPHOON_REG_STATUS));
goto err_out_irq;
}
@@ -1555,7 +1545,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
void __iomem *ioaddr = tp->ioaddr;
if(typhoon_wait_status(ioaddr, initial_status) < 0) {
- printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
+ netdev_err(tp->dev, "boot ready timeout\n");
goto out_timeout;
}
@@ -1566,8 +1556,8 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
ioaddr + TYPHOON_REG_COMMAND);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
- printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
- tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
+ netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
+ ioread32(ioaddr + TYPHOON_REG_STATUS));
goto out_timeout;
}
@@ -1866,8 +1856,7 @@ typhoon_interrupt(int irq, void *dev_instance)
typhoon_post_pci_writes(ioaddr);
__napi_schedule(&tp->napi);
} else {
- printk(KERN_ERR "%s: Error, poll already scheduled\n",
- dev->name);
+ netdev_err(dev, "Error, poll already scheduled\n");
}
return IRQ_HANDLED;
}
@@ -1900,16 +1889,15 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
xp_cmd.parm1 = events;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0) {
- printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
- tp->name, err);
+ netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
+ err);
return err;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0) {
- printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
- tp->name, err);
+ netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
return err;
}
@@ -1960,12 +1948,12 @@ typhoon_start_runtime(struct typhoon *tp)
err = typhoon_download_firmware(tp);
if(err < 0) {
- printk("%s: cannot load runtime on 3XP\n", tp->name);
+ netdev_err(tp->dev, "cannot load runtime on 3XP\n");
goto error_out;
}
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
- printk("%s: cannot boot 3XP\n", tp->name);
+ netdev_err(tp->dev, "cannot boot 3XP\n");
err = -EIO;
goto error_out;
}
@@ -2069,9 +2057,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
}
if(i == TYPHOON_WAIT_TIMEOUT)
- printk(KERN_ERR
- "%s: halt timed out waiting for Tx to complete\n",
- tp->name);
+ netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
@@ -2088,11 +2074,10 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
- printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
- tp->name);
+ netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
if(typhoon_reset(ioaddr, wait_type) < 0) {
- printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
+ netdev_err(tp->dev, "unable to reset 3XP\n");
return -ETIMEDOUT;
}
@@ -2111,9 +2096,8 @@ typhoon_tx_timeout(struct net_device *dev)
struct typhoon *tp = netdev_priv(dev);
if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
- printk(KERN_WARNING "%s: could not reset in tx timeout\n",
- dev->name);
- goto truely_dead;
+ netdev_warn(dev, "could not reset in tx timeout\n");
+ goto truly_dead;
}
/* If we ever start using the Hi ring, it will need cleaning too */
@@ -2121,15 +2105,14 @@ typhoon_tx_timeout(struct net_device *dev)
typhoon_free_rx_rings(tp);
if(typhoon_start_runtime(tp) < 0) {
- printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
- dev->name);
- goto truely_dead;
+ netdev_err(dev, "could not start runtime in tx timeout\n");
+ goto truly_dead;
}
netif_wake_queue(dev);
return;
-truely_dead:
+truly_dead:
/* Reset the hardware, and turn off carrier to avoid more timeouts */
typhoon_reset(tp->ioaddr, NoWait);
netif_carrier_off(dev);
@@ -2147,7 +2130,7 @@ typhoon_open(struct net_device *dev)
err = typhoon_wakeup(tp, WaitSleep);
if(err < 0) {
- printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
+ netdev_err(dev, "unable to wakeup device\n");
goto out_sleep;
}
@@ -2172,14 +2155,13 @@ out_irq:
out_sleep:
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(KERN_ERR "%s: unable to reboot into sleep img\n",
- dev->name);
+ netdev_err(dev, "unable to reboot into sleep img\n");
typhoon_reset(tp->ioaddr, NoWait);
goto out;
}
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
- printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
+ netdev_err(dev, "unable to go back to sleep\n");
out:
return err;
@@ -2194,7 +2176,7 @@ typhoon_close(struct net_device *dev)
napi_disable(&tp->napi);
if(typhoon_stop_runtime(tp, WaitSleep) < 0)
- printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
+ netdev_err(dev, "unable to stop runtime\n");
/* Make sure there is no irq handler running on a different CPU. */
free_irq(dev->irq, dev);
@@ -2203,10 +2185,10 @@ typhoon_close(struct net_device *dev)
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
- printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
+ netdev_err(dev, "unable to boot sleep image\n");
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
- printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
+ netdev_err(dev, "unable to put card to sleep\n");
return 0;
}
@@ -2224,14 +2206,12 @@ typhoon_resume(struct pci_dev *pdev)
return 0;
if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
- printk(KERN_ERR "%s: critical: could not wake up in resume\n",
- dev->name);
+ netdev_err(dev, "critical: could not wake up in resume\n");
goto reset;
}
if(typhoon_start_runtime(tp) < 0) {
- printk(KERN_ERR "%s: critical: could not start runtime in "
- "resume\n", dev->name);
+ netdev_err(dev, "critical: could not start runtime in resume\n");
goto reset;
}
@@ -2258,8 +2238,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
spin_lock_bh(&tp->state_lock);
if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
spin_unlock_bh(&tp->state_lock);
- printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
- dev->name);
+ netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
return -EBUSY;
}
spin_unlock_bh(&tp->state_lock);
@@ -2267,7 +2246,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
- printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
+ netdev_err(dev, "unable to stop runtime\n");
goto need_resume;
}
@@ -2275,7 +2254,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
+ netdev_err(dev, "unable to boot sleep image\n");
goto need_resume;
}
@@ -2283,21 +2262,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
- printk(KERN_ERR "%s: unable to set mac address in suspend\n",
- dev->name);
+ netdev_err(dev, "unable to set mac address in suspend\n");
goto need_resume;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
- printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
- dev->name);
+ netdev_err(dev, "unable to set rx filter in suspend\n");
goto need_resume;
}
if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
- printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
+ netdev_err(dev, "unable to put card to sleep\n");
goto need_resume;
}
@@ -2351,7 +2328,7 @@ out_unmap:
out:
if(!mode)
- printk(KERN_INFO PFX "falling back to port IO\n");
+ pr_info("%s: falling back to port IO\n", pci_name(pdev));
return mode;
}
@@ -2371,7 +2348,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
static int __devinit
typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int did_version = 0;
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
@@ -2381,14 +2357,11 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[3];
int err = 0;
-
- if(!did_version++)
- printk(KERN_INFO "%s", version);
+ const char *err_msg;
dev = alloc_etherdev(sizeof(*tp));
if(dev == NULL) {
- printk(ERR_PFX "%s: unable to alloc new net device\n",
- pci_name(pdev));
+ err_msg = "unable to alloc new net device";
err = -ENOMEM;
goto error_out;
}
@@ -2396,57 +2369,48 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if(err < 0) {
- printk(ERR_PFX "%s: unable to enable device\n",
- pci_name(pdev));
+ err_msg = "unable to enable device";
goto error_out_dev;
}
err = pci_set_mwi(pdev);
if(err < 0) {
- printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
+ err_msg = "unable to set MWI";
goto error_out_disable;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if(err < 0) {
- printk(ERR_PFX "%s: No usable DMA configuration\n",
- pci_name(pdev));
+ err_msg = "No usable DMA configuration";
goto error_out_mwi;
}
/* sanity checks on IO and MMIO BARs
*/
if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
- printk(ERR_PFX
- "%s: region #1 not a PCI IO resource, aborting\n",
- pci_name(pdev));
+ err_msg = "region #1 not a PCI IO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(pci_resource_len(pdev, 0) < 128) {
- printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
- pci_name(pdev));
+ err_msg = "Invalid PCI IO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- printk(ERR_PFX
- "%s: region #1 not a PCI MMIO resource, aborting\n",
- pci_name(pdev));
+ err_msg = "region #1 not a PCI MMIO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(pci_resource_len(pdev, 1) < 128) {
- printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
- pci_name(pdev));
+ err_msg = "Invalid PCI MMIO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- err = pci_request_regions(pdev, "typhoon");
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
if(err < 0) {
- printk(ERR_PFX "%s: could not request regions\n",
- pci_name(pdev));
+ err_msg = "could not request regions";
goto error_out_mwi;
}
@@ -2457,8 +2421,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_iomap(pdev, use_mmio, 128);
if (!ioaddr) {
- printk(ERR_PFX "%s: cannot remap registers, aborting\n",
- pci_name(pdev));
+ err_msg = "cannot remap registers, aborting";
err = -EIO;
goto error_out_regions;
}
@@ -2468,8 +2431,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
&shared_dma);
if(!shared) {
- printk(ERR_PFX "%s: could not allocate DMA memory\n",
- pci_name(pdev));
+ err_msg = "could not allocate DMA memory";
err = -ENOMEM;
goto error_out_remap;
}
@@ -2492,7 +2454,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* 5) Put the card to sleep.
*/
if (typhoon_reset(ioaddr, WaitSleep) < 0) {
- printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
+ err_msg = "could not reset 3XP";
err = -EIO;
goto error_out_dma;
}
@@ -2504,26 +2466,18 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
- /* dev->name is not valid until we register, but we need to
- * use some common routines to initialize the card. So that those
- * routines print the right name, we keep our oun pointer to the name
- */
- tp->name = pci_name(pdev);
-
typhoon_init_interface(tp);
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
- pci_name(pdev));
+ err_msg = "cannot boot 3XP sleep image";
err = -EIO;
goto error_out_reset;
}
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
- printk(ERR_PFX "%s: cannot read MAC address\n",
- pci_name(pdev));
+ err_msg = "cannot read MAC address";
err = -EIO;
goto error_out_reset;
}
@@ -2532,8 +2486,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
if(!is_valid_ether_addr(dev->dev_addr)) {
- printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
- "aborting\n", pci_name(pdev));
+ err_msg = "Could not obtain valid ethernet address, aborting";
goto error_out_reset;
}
@@ -2542,8 +2495,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- printk(ERR_PFX "%s: Could not get Sleep Image version\n",
- pci_name(pdev));
+ err_msg = "Could not get Sleep Image version";
goto error_out_reset;
}
@@ -2560,8 +2512,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
- printk(ERR_PFX "%s: cannot put adapter to sleep\n",
- pci_name(pdev));
+ err_msg = "cannot put adapter to sleep";
err = -EIO;
goto error_out_reset;
}
@@ -2580,19 +2531,18 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= NETIF_F_TSO;
- if(register_netdev(dev) < 0)
+ if(register_netdev(dev) < 0) {
+ err_msg = "unable to register netdev";
goto error_out_reset;
-
- /* fixup our local name */
- tp->name = dev->name;
+ }
pci_set_drvdata(pdev, dev);
- printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
- dev->name, typhoon_card_info[card_id].name,
- use_mmio ? "MMIO" : "IO",
- (unsigned long long)pci_resource_start(pdev, use_mmio),
- dev->dev_addr);
+ netdev_info(dev, "%s at %s 0x%llx, %pM\n",
+ typhoon_card_info[card_id].name,
+ use_mmio ? "MMIO" : "IO",
+ (unsigned long long)pci_resource_start(pdev, use_mmio),
+ dev->dev_addr);
/* xp_resp still contains the response to the READ_VERSIONS command.
* For debugging, let the user know what version he has.
@@ -2602,23 +2552,20 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* of version is Month/Day of build.
*/
u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
- printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
- "%02u/%02u/2000\n", dev->name, monthday >> 8,
- monthday & 0xff);
+ netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
+ monthday >> 8, monthday & 0xff);
} else if(xp_resp[0].numDesc == 2) {
/* This is the Typhoon 1.1+ type Sleep Image
*/
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
u8 *ver_string = (u8 *) &xp_resp[1];
ver_string[25] = 0;
- printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
- "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
- (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
- ver_string);
+ netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
+ sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
+ sleep_ver & 0xfff, ver_string);
} else {
- printk(KERN_WARNING "%s: Unknown Sleep Image version "
- "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
- le32_to_cpu(xp_resp[0].parm2));
+ netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
+ xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
}
return 0;
@@ -2640,6 +2587,7 @@ error_out_disable:
error_out_dev:
free_netdev(dev);
error_out:
+ pr_err("%s: %s\n", pci_name(pdev), err_msg);
return err;
}
@@ -2664,7 +2612,7 @@ typhoon_remove_one(struct pci_dev *pdev)
}
static struct pci_driver typhoon_driver = {
- .name = DRV_MODULE_NAME,
+ .name = KBUILD_MODNAME,
.id_table = typhoon_pci_tbl,
.probe = typhoon_init_one,
.remove = __devexit_p(typhoon_remove_one),
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index afaf088b72ea..1b0aef37e495 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -37,6 +37,7 @@
#include <asm/qe.h>
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
+#include <asm/machdep.h>
#include "ucc_geth.h"
#include "fsl_pq_mdio.h"
@@ -429,7 +430,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
/* Ethernet frames are defined in Little Endian mode,
- therefor to insert */
+ therefore to insert */
/* the address to the hash (Big Endian mode), we reverse the bytes.*/
set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
@@ -1334,7 +1335,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
int ret_val;
- u32 upsmr, maccfg2, tbiBaseAddress;
+ u32 upsmr, maccfg2;
u16 value;
ugeth_vdbg("%s: IN", __func__);
@@ -1389,14 +1390,20 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
/* Note that this depends on proper setting in utbipar register. */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- tbiBaseAddress = in_be32(&ug_regs->utbipar);
- tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
- tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
- value = ugeth->phydev->bus->read(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ ugeth_warn("TBI mode requires that the device "
+ "tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ ugeth_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
- ugeth->phydev->bus->write(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1563,7 +1570,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Wait for and prevent any further xmits. */
+ /* Prevent any further xmits, plus detach the device. */
+ netif_device_detach(ugeth->ndev);
+
+ /* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1587,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_tx_wake_all_queues(ugeth->ndev);
+ netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
@@ -1648,25 +1658,28 @@ static void adjust_link(struct net_device *dev)
ugeth->oldspeed = phydev->speed;
}
- /*
- * To change the MAC configuration we need to disable the
- * controller. To do so, we have to either grab ugeth->lock,
- * which is a bad idea since 'graceful stop' commands might
- * take quite a while, or we can quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
-
if (!ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 1;
}
+
+ if (new_state) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, tempval);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
} else if (ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 0;
@@ -1989,7 +2002,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
struct dev_mc_list *dmi;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
- int i;
ugeth = netdev_priv(dev);
@@ -2016,10 +2028,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
- dmi = dev->mc_list;
-
- for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
-
+ netdev_for_each_mc_addr(dmi, dev) {
/* Only support group multicast for now.
*/
if (!(dmi->dmi_addr[0] & 1))
@@ -3273,13 +3282,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
- if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+ if (!skb)
break;
dev->stats.tx_packets++;
- skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
-
if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
skb_recycle_check(skb,
ugeth->ug_info->uf_info.max_rx_buf_length +
@@ -3601,6 +3609,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
if (!netif_running(ndev))
return 0;
+ netif_device_detach(ndev);
napi_disable(&ugeth->napi);
/*
@@ -3659,7 +3668,7 @@ static int ucc_geth_resume(struct of_device *ofdev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
- netif_start_queue(ndev);
+ netif_device_attach(ndev);
return 0;
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index a007e2acf651..ef1fbeb11c6e 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -838,13 +838,13 @@ struct ucc_geth_hardware_statistics {
using the maximum is
easier */
#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
-#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
-#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
is a
guess
*/
@@ -899,16 +899,17 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 128
+#define UCC_GETH_UTFTT_INIT 512
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
-#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
+#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
FIFO size */
-#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
-#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
#define UCC_GETH_REMODER_INIT 0 /* bits that must be
set */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 7075f26e97da..6f92e48f02d3 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 32d93564a74d..5d58abc224f4 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -204,6 +204,14 @@ config USB_NET_DM9601
This option adds support for Davicom DM9601 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SMSC75XX
+ tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ help
+ This option adds support for SMSC LAN95XX based USB 2.0
+ Gigabit Ethernet adapters.
+
config USB_NET_SMSC95XX
tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
depends on USB_USBNET
@@ -377,4 +385,26 @@ config USB_CDC_PHONET
cellular modem, as found on most Nokia handsets with the
"PC suite" USB profile.
+config USB_IPHETH
+ tristate "Apple iPhone USB Ethernet driver"
+ default n
+ ---help---
+ Module used to share Internet connection (tethering) from your
+ iPhone (Original, 3G and 3GS) to your system.
+ Note that you need userspace libraries and programs that are needed
+ to pair your device with your system and that understand the iPhone
+ protocol.
+
+ For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
+
+config USB_SIERRA_NET
+ tristate "USB-to-WWAN Driver for Sierra Wireless modems"
+ depends on USB_USBNET
+ default y
+ help
+ Choose this option if you have a Sierra Wireless USB-to-WWAN device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sierra_net.
+
endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e17afb78f372..b13a279663ba 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_NET_AX8817X) += asix.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
+obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
obj-$(CONFIG_USB_NET_NET1080) += net1080.o
@@ -22,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
+obj-$(CONFIG_USB_IPHETH) += ipheth.o
+obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index a516185cbc9f..35f56fc82803 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -34,6 +34,7 @@
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
#define DRIVER_VERSION "14-Jun-2006"
static const char driver_name [] = "asix";
@@ -54,6 +55,7 @@ static const char driver_name [] = "asix";
#define AX_CMD_WRITE_IPG0 0x12
#define AX_CMD_WRITE_IPG1 0x13
#define AX_CMD_READ_NODE_ID 0x13
+#define AX_CMD_WRITE_NODE_ID 0x14
#define AX_CMD_WRITE_IPG2 0x14
#define AX_CMD_WRITE_MULTI_FILTER 0x16
#define AX88172_CMD_READ_NODE_ID 0x17
@@ -165,6 +167,7 @@ static const char driver_name [] = "asix";
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
u8 phymode;
u8 ledmode;
u8 eeprom_len;
@@ -184,8 +187,8 @@ static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
void *buf;
int err = -ENOMEM;
- devdbg(dev,"asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
- cmd, value, index, size);
+ netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
@@ -217,8 +220,8 @@ static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
void *buf = NULL;
int err = -ENOMEM;
- devdbg(dev,"asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
- cmd, value, index, size);
+ netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
if (data) {
buf = kmalloc(size, GFP_KERNEL);
@@ -264,15 +267,15 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
int status;
struct urb *urb;
- devdbg(dev,"asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
- cmd, value, index, size);
+ netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
- deverr(dev, "Error allocating URB in write_cmd_async!");
+ netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
return;
}
if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
- deverr(dev, "Failed to allocate memory for control request");
+ netdev_err(dev->net, "Failed to allocate memory for control request\n");
usb_free_urb(urb);
return;
}
@@ -289,8 +292,8 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
asix_async_cmd_callback, req);
if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- deverr(dev, "Error submitting the control message: status=%d",
- status);
+ netdev_err(dev->net, "Error submitting the control message: status=%d\n",
+ status);
kfree(req);
usb_free_urb(urb);
}
@@ -314,7 +317,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
while (skb->len > 0) {
if ((short)(header & 0x0000ffff) !=
~((short)((header & 0xffff0000) >> 16))) {
- deverr(dev,"asix_rx_fixup() Bad Header Length");
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
}
/* get the packet length */
size = (u16) (header & 0x0000ffff);
@@ -322,7 +325,8 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((skb->len) - ((size + 1) & 0xfffe) == 0)
return 2;
if (size > ETH_FRAME_LEN) {
- deverr(dev,"asix_rx_fixup() Bad RX Length %d", size);
+ netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+ size);
return 0;
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
@@ -348,7 +352,8 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
if (skb->len < 0) {
- deverr(dev,"asix_rx_fixup() Bad SKB Length %d", skb->len);
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
+ skb->len);
return 0;
}
return 1;
@@ -409,7 +414,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
usbnet_defer_kevent (dev, EVENT_LINK_RESET );
} else
netif_carrier_off(dev->net);
- devdbg(dev, "Link Status is: %d", link);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
}
@@ -418,7 +423,7 @@ static inline int asix_set_sw_mii(struct usbnet *dev)
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to enable software MII access");
+ netdev_err(dev->net, "Failed to enable software MII access\n");
return ret;
}
@@ -427,7 +432,7 @@ static inline int asix_set_hw_mii(struct usbnet *dev)
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to enable hardware MII access");
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
return ret;
}
@@ -436,13 +441,14 @@ static inline int asix_get_phy_addr(struct usbnet *dev)
u8 buf[2];
int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
- devdbg(dev, "asix_get_phy_addr()");
+ netdev_dbg(dev->net, "asix_get_phy_addr()\n");
if (ret < 0) {
- deverr(dev, "Error reading PHYID register: %02x", ret);
+ netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
}
- devdbg(dev, "asix_get_phy_addr() returning 0x%04x", *((__le16 *)buf));
+ netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
+ *((__le16 *)buf));
ret = buf[1];
out:
@@ -455,7 +461,7 @@ static int asix_sw_reset(struct usbnet *dev, u8 flags)
ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
if (ret < 0)
- deverr(dev,"Failed to send software reset: %02x", ret);
+ netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
return ret;
}
@@ -466,7 +472,7 @@ static u16 asix_read_rx_ctl(struct usbnet *dev)
int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
if (ret < 0) {
- deverr(dev, "Error reading RX_CTL register: %02x", ret);
+ netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
goto out;
}
ret = le16_to_cpu(v);
@@ -478,11 +484,11 @@ static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
{
int ret;
- devdbg(dev,"asix_write_rx_ctl() - mode = 0x%04x", mode);
+ netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to write RX_CTL mode to 0x%04x: %02x",
- mode, ret);
+ netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
+ mode, ret);
return ret;
}
@@ -493,7 +499,8 @@ static u16 asix_read_medium_status(struct usbnet *dev)
int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
if (ret < 0) {
- deverr(dev, "Error reading Medium Status register: %02x", ret);
+ netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
+ ret);
goto out;
}
ret = le16_to_cpu(v);
@@ -505,11 +512,11 @@ static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
{
int ret;
- devdbg(dev,"asix_write_medium_mode() - mode = 0x%04x", mode);
+ netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to write Medium Mode mode to 0x%04x: %02x",
- mode, ret);
+ netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
+ mode, ret);
return ret;
}
@@ -518,11 +525,11 @@ static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
{
int ret;
- devdbg(dev,"asix_write_gpio() - value = 0x%04x", value);
+ netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to write GPIO value 0x%04x: %02x",
- value, ret);
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
+ value, ret);
if (sleep)
msleep(sleep);
@@ -542,29 +549,27 @@ static void asix_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= AX_RX_CTL_AMALL;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
@@ -588,7 +593,8 @@ static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
asix_set_hw_mii(dev);
mutex_unlock(&dev->phy_mutex);
- devdbg(dev, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x", phy_id, loc, le16_to_cpu(res));
+ netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
@@ -599,7 +605,8 @@ asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
- devdbg(dev, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x", phy_id, loc, val);
+ netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
asix_set_sw_mii(dev);
asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
@@ -728,6 +735,30 @@ static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
+static int asix_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
+
/* We need to override some ethtool_ops so we require our
own structure so we don't interfere with other usbnet
devices that may be connected at the same time. */
@@ -754,29 +785,27 @@ static void ax88172_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x01;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= 0x02;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
@@ -800,7 +829,8 @@ static int ax88172_link_reset(struct usbnet *dev)
if (ecmd.duplex != DUPLEX_FULL)
mode |= ~AX88172_MEDIUM_FD;
- devdbg(dev, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
+ ecmd.speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -902,7 +932,8 @@ static int ax88772_link_reset(struct usbnet *dev)
if (ecmd.duplex != DUPLEX_FULL)
mode &= ~AX_MEDIUM_FD;
- devdbg(dev, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
+ ecmd.speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -915,7 +946,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_multicast_list = asix_set_multicast,
@@ -1059,10 +1090,10 @@ static int marvell_phy_init(struct usbnet *dev)
struct asix_data *data = (struct asix_data *)&dev->data;
u16 reg;
- devdbg(dev,"marvell_phy_init()");
+ netdev_dbg(dev->net, "marvell_phy_init()\n");
reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS);
- devdbg(dev,"MII_MARVELL_STATUS = 0x%04x", reg);
+ netdev_dbg(dev->net, "MII_MARVELL_STATUS = 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL,
MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY);
@@ -1070,7 +1101,7 @@ static int marvell_phy_init(struct usbnet *dev)
if (data->ledmode) {
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
- devdbg(dev,"MII_MARVELL_LED_CTRL (1) = 0x%04x", reg);
+ netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (1) = 0x%04x\n", reg);
reg &= 0xf8ff;
reg |= (1 + 0x0100);
@@ -1079,7 +1110,7 @@ static int marvell_phy_init(struct usbnet *dev)
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
- devdbg(dev,"MII_MARVELL_LED_CTRL (2) = 0x%04x", reg);
+ netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
reg &= 0xfc0f;
}
@@ -1090,7 +1121,7 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
{
u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL);
- devdbg(dev, "marvell_led_status() read 0x%04x", reg);
+ netdev_dbg(dev->net, "marvell_led_status() read 0x%04x\n", reg);
/* Clear out the center LED bits - 0x03F0 */
reg &= 0xfc0f;
@@ -1106,7 +1137,7 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
reg |= 0x02f0;
}
- devdbg(dev, "marvell_led_status() writing 0x%04x", reg);
+ netdev_dbg(dev->net, "marvell_led_status() writing 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg);
return 0;
@@ -1118,7 +1149,7 @@ static int ax88178_link_reset(struct usbnet *dev)
struct ethtool_cmd ecmd;
struct asix_data *data = (struct asix_data *)&dev->data;
- devdbg(dev,"ax88178_link_reset()");
+ netdev_dbg(dev->net, "ax88178_link_reset()\n");
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -1138,7 +1169,8 @@ static int ax88178_link_reset(struct usbnet *dev)
else
mode &= ~AX_MEDIUM_FD;
- devdbg(dev, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
+ ecmd.speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -1188,7 +1220,7 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
struct usbnet *dev = netdev_priv(net);
int ll_mtu = new_mtu + net->hard_header_len + 4;
- devdbg(dev, "ax88178_change_mtu() new_mtu=%d", new_mtu);
+ netdev_dbg(dev->net, "ax88178_change_mtu() new_mtu=%d\n", new_mtu);
if (new_mtu <= 0 || ll_mtu > 16384)
return -EINVAL;
@@ -1208,7 +1240,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = asix_set_multicast,
.ndo_do_ioctl = asix_ioctl,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22b87e64a810..602e123b2741 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -36,7 +36,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -44,6 +43,7 @@
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
+#include <linux/gfp.h>
#include <asm/uaccess.h>
#undef DEBUG
@@ -632,7 +632,6 @@ static void catc_set_multicast_list(struct net_device *netdev)
struct dev_mc_list *mc;
u8 broadcast[6];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
- int i;
memset(broadcast, 0xff, 6);
memset(catc->multicast, 0, 64);
@@ -648,7 +647,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- for (i = 0, mc = netdev->mc_list; mc && i < netdev->mc_count; i++, mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
u32 crc = ether_crc_le(6, mc->dmi_addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
@@ -897,11 +896,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
f5u011_rxmode(catc, catc->rxmode);
}
dbg("Init done.");
- printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ",
+ printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
- usbdev->bus->bus_name, usbdev->devpath);
- for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
- printk("%2.2x.\n", netdev->dev_addr[i]);
+ usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 6491c9c00c83..dc9444525b49 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index c337ffc3304a..5f3b97668e63 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -30,6 +30,7 @@
#include <linux/crc32.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
+#include <linux/gfp.h>
/*
@@ -73,7 +74,7 @@ static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb)
usb_free_urb(urb);
fail:
dev_kfree_skb(skb);
- devwarn(dev, "link cmd failure\n");
+ netdev_warn(dev->net, "link cmd failure\n");
return;
}
}
@@ -212,7 +213,8 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* b15: 1 (EEM command)
*/
if (header & BIT(14)) {
- devdbg(dev, "reserved command %04x\n", header);
+ netdev_dbg(dev->net, "reserved command %04x\n",
+ header);
continue;
}
@@ -255,8 +257,9 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
case 1: /* Echo response */
case 5: /* Tickle */
default: /* reserved */
- devwarn(dev, "unexpected link command %d\n",
- bmEEMCmd);
+ netdev_warn(dev->net,
+ "unexpected link command %d\n",
+ bmEEMCmd);
continue;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e183a83b99..3547cf13d219 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -339,10 +339,10 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
static void dumpspeed(struct usbnet *dev, __le32 *speeds)
{
- if (netif_msg_timer(dev))
- devinfo(dev, "link speeds: %u kbps up, %u kbps down",
- __le32_to_cpu(speeds[0]) / 1000,
- __le32_to_cpu(speeds[1]) / 1000);
+ netif_info(dev, timer, dev->net,
+ "link speeds: %u kbps up, %u kbps down\n",
+ __le32_to_cpu(speeds[0]) / 1000,
+ __le32_to_cpu(speeds[1]) / 1000);
}
static void cdc_status(struct usbnet *dev, struct urb *urb)
@@ -361,18 +361,16 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
event = urb->transfer_buffer;
switch (event->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
- if (netif_msg_timer(dev))
- devdbg(dev, "CDC: carrier %s",
- event->wValue ? "on" : "off");
+ netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
+ event->wValue ? "on" : "off");
if (event->wValue)
netif_carrier_on(dev->net);
else
netif_carrier_off(dev->net);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
- if (netif_msg_timer(dev))
- devdbg(dev, "CDC: speed change (len %d)",
- urb->actual_length);
+ netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
+ urb->actual_length);
if (urb->actual_length != (sizeof *event + 8))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
@@ -382,8 +380,8 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
* but there are no standard formats for the response data.
*/
default:
- deverr(dev, "CDC: unexpected notification %02x!",
- event->bNotificationType);
+ netdev_err(dev->net, "CDC: unexpected notification %02x!\n",
+ event->bNotificationType);
break;
}
}
@@ -419,7 +417,7 @@ static int cdc_manage_power(struct usbnet *dev, int on)
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
- .flags = FLAG_ETHER | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER,
// .check_connect = cdc_check_connect,
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
@@ -433,6 +431,7 @@ static const struct driver_info mbm_info = {
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = cdc_status,
+ .manage_power = cdc_manage_power,
};
/*-------------------------------------------------------------------------*/
@@ -584,6 +583,11 @@ static const struct usb_device_id products [] = {
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &mbm_info,
}, {
+ /* Ericsson C3607w ver 2 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &mbm_info,
+}, {
/* Toshiba F3507g */
USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3d406f9b2f29..04b281002a76 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -21,6 +21,7 @@
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
/* datasheet:
http://ptm2.cc.utu.fi/ftp/network/cards/DM9601/From_NET/DM9601-DS-P01-930914.pdf
@@ -58,7 +59,7 @@ static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
void *buf;
int err = -ENOMEM;
- devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length);
+ netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length);
buf = kmalloc(length, GFP_KERNEL);
if (!buf)
@@ -89,7 +90,7 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
void *buf = NULL;
int err = -ENOMEM;
- devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length);
+ netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
if (data) {
buf = kmalloc(length, GFP_KERNEL);
@@ -112,7 +113,8 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- devdbg(dev, "dm_write_reg() reg=0x%02x, value=0x%02x", reg, value);
+ netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n",
+ reg, value);
return usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
DM_WRITE_REG,
@@ -142,13 +144,13 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- deverr(dev, "Error allocating URB in dm_write_async_helper!");
+ netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n");
return;
}
req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
if (!req) {
- deverr(dev, "Failed to allocate memory for control request");
+ netdev_err(dev->net, "Failed to allocate memory for control request\n");
usb_free_urb(urb);
return;
}
@@ -166,8 +168,8 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- deverr(dev, "Error submitting the control message: status=%d",
- status);
+ netdev_err(dev->net, "Error submitting the control message: status=%d\n",
+ status);
kfree(req);
usb_free_urb(urb);
}
@@ -175,15 +177,15 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
{
- devdbg(dev, "dm_write_async() reg=0x%02x length=%d", reg, length);
+ netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
dm_write_async_helper(dev, reg, 0, length, data);
}
static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- devdbg(dev, "dm_write_reg_async() reg=0x%02x value=0x%02x",
- reg, value);
+ netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
+ reg, value);
dm_write_async_helper(dev, reg, value, 0, NULL);
}
@@ -211,7 +213,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
}
if (i == DM_TIMEOUT) {
- deverr(dev, "%s read timed out!", phy ? "phy" : "eeprom");
+ netdev_err(dev->net, "%s read timed out!\n", phy ? "phy" : "eeprom");
ret = -EIO;
goto out;
}
@@ -219,8 +221,8 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
dm_write_reg(dev, DM_SHARED_CTRL, 0x0);
ret = dm_read(dev, DM_SHARED_DATA, 2, value);
- devdbg(dev, "read shared %d 0x%02x returned 0x%04x, %d",
- phy, reg, *value, ret);
+ netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
+ phy, reg, *value, ret);
out:
mutex_unlock(&dev->phy_mutex);
@@ -254,7 +256,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
}
if (i == DM_TIMEOUT) {
- deverr(dev, "%s write timed out!", phy ? "phy" : "eeprom");
+ netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom");
ret = -EIO;
goto out;
}
@@ -304,15 +306,15 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
__le16 res;
if (phy_id) {
- devdbg(dev, "Only internal phy supported");
+ netdev_dbg(dev->net, "Only internal phy supported\n");
return 0;
}
dm_read_shared_word(dev, 1, loc, &res);
- devdbg(dev,
- "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x",
- phy_id, loc, le16_to_cpu(res));
+ netdev_dbg(dev->net,
+ "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
@@ -324,12 +326,12 @@ static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc,
__le16 res = cpu_to_le16(val);
if (phy_id) {
- devdbg(dev, "Only internal phy supported");
+ netdev_dbg(dev->net, "Only internal phy supported\n");
return;
}
- devdbg(dev,"dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x",
- phy_id, loc, val);
+ netdev_dbg(dev->net, "dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
dm_write_shared_word(dev, 1, loc, res);
}
@@ -381,13 +383,13 @@ static void dm9601_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x02;
- } else if (net->flags & IFF_ALLMULTI || net->mc_count > DM_MAX_MCAST) {
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
- } else if (net->mc_count) {
- struct dev_mc_list *mc_list = net->mc_list;
- int i;
+ } else if (!netdev_mc_empty(net)) {
+ struct dev_mc_list *mc_list;
- for (i = 0; i < net->mc_count; i++, mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, net) {
u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
@@ -592,7 +594,7 @@ static void dm9601_status(struct usbnet *dev, struct urb *urb)
}
else
netif_carrier_off(dev->net);
- devdbg(dev, "Link Status is: %d", link);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
}
@@ -603,8 +605,8 @@ static int dm9601_link_reset(struct usbnet *dev)
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
- devdbg(dev, "link_reset() speed: %d duplex: %d",
- ecmd.speed, ecmd.duplex);
+ netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
+ ecmd.speed, ecmd.duplex);
return 0;
}
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index f7ccfad9384e..dcd57c37ef73 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -30,6 +30,7 @@
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
+#include <linux/gfp.h>
/*
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f78f0903b073..be0cc99e881a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -286,6 +286,7 @@ struct hso_device {
u8 usb_gone;
struct work_struct async_get_intf;
struct work_struct async_put_intf;
+ struct work_struct reset_device;
struct usb_device *usb;
struct usb_interface *interface;
@@ -332,7 +333,8 @@ static void hso_kick_transmit(struct hso_serial *serial);
/* Helper functions */
static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
struct usb_device *usb, gfp_t gfp);
-static void log_usb_status(int status, const char *function);
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev);
static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
int type, int dir);
static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports);
@@ -350,6 +352,7 @@ static void async_put_intf(struct work_struct *data);
static int hso_put_activity(struct hso_device *hso_dev);
static int hso_get_activity(struct hso_device *hso_dev);
static void tiocmget_intr_callback(struct urb *urb);
+static void reset_device(struct work_struct *data);
/*****************************************************************************/
/* Helping functions */
/*****************************************************************************/
@@ -461,10 +464,17 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */
{USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */
{USB_DEVICE(0x0af0, 0x7701)},
+ {USB_DEVICE(0x0af0, 0x7706)},
{USB_DEVICE(0x0af0, 0x7801)},
{USB_DEVICE(0x0af0, 0x7901)},
+ {USB_DEVICE(0x0af0, 0x7A01)},
+ {USB_DEVICE(0x0af0, 0x7A05)},
{USB_DEVICE(0x0af0, 0x8200)},
{USB_DEVICE(0x0af0, 0x8201)},
+ {USB_DEVICE(0x0af0, 0x8300)},
+ {USB_DEVICE(0x0af0, 0x8302)},
+ {USB_DEVICE(0x0af0, 0x8304)},
+ {USB_DEVICE(0x0af0, 0x8400)},
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
@@ -473,6 +483,8 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0xd157)},
{USB_DEVICE(0x0af0, 0xd257)},
{USB_DEVICE(0x0af0, 0xd357)},
+ {USB_DEVICE(0x0af0, 0xd058)},
+ {USB_DEVICE(0x0af0, 0xc100)},
{}
};
MODULE_DEVICE_TABLE(usb, hso_ids);
@@ -655,8 +667,8 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial)
spin_unlock_irqrestore(&serial_table_lock, flags);
}
-/* log a meaningful explanation of an USB status */
-static void log_usb_status(int status, const char *function)
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev)
{
char *explanation;
@@ -685,10 +697,20 @@ static void log_usb_status(int status, const char *function)
case -EMSGSIZE:
explanation = "internal error";
break;
+ case -EILSEQ:
+ case -EPROTO:
+ case -ETIME:
+ case -ETIMEDOUT:
+ explanation = "protocol error";
+ if (hso_dev)
+ schedule_work(&hso_dev->reset_device);
+ break;
default:
explanation = "unknown status";
break;
}
+
+ /* log a meaningful explanation of an USB status */
D1("%s: received USB status - %s (%d)", function, explanation, status);
}
@@ -762,7 +784,7 @@ static void write_bulk_callback(struct urb *urb)
/* log status, but don't act on it, we don't need to resubmit anything
* anyhow */
if (status)
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
hso_put_activity(odev->parent);
@@ -806,7 +828,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC);
if (result) {
dev_warn(&odev->parent->interface->dev,
- "failed mux_bulk_tx_urb %d", result);
+ "failed mux_bulk_tx_urb %d\n", result);
net->stats.tx_errors++;
netif_start_queue(net);
} else {
@@ -998,7 +1020,7 @@ static void read_bulk_callback(struct urb *urb)
/* is al ok? (Filip: Who's Al ?) */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
return;
}
@@ -1019,7 +1041,8 @@ static void read_bulk_callback(struct urb *urb)
if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
u32 rest;
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest = urb->actual_length % odev->in_endp->wMaxPacketSize;
+ rest = urb->actual_length %
+ le16_to_cpu(odev->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1053,7 +1076,7 @@ static void read_bulk_callback(struct urb *urb)
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_warn(&odev->parent->interface->dev,
- "%s failed submit mux_bulk_rx_urb %d", __func__,
+ "%s failed submit mux_bulk_rx_urb %d\n", __func__,
result);
}
@@ -1132,9 +1155,6 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb)
{
int result;
-#ifdef CONFIG_HSO_AUTOPM
- usb_mark_last_busy(urb->dev);
-#endif
/* We are done with this URB, resubmit it. Prep the USB to wait for
* another frame */
usb_fill_bulk_urb(urb, serial->parent->usb,
@@ -1207,7 +1227,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
D1("serial == NULL");
return;
} else if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
@@ -1225,7 +1245,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
rest =
urb->actual_length %
- serial->in_endp->wMaxPacketSize;
+ le16_to_cpu(serial->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1513,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb)
if (!serial)
return;
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
tiocmget = serial->tiocmget;
@@ -1700,6 +1720,10 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
D1("no tty structures");
return -EINVAL;
}
+
+ if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM)
+ return -EINVAL;
+
if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1838,7 +1862,7 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
result = usb_submit_urb(ctrl_urb, GFP_ATOMIC);
if (result) {
dev_err(&ctrl_urb->dev->dev,
- "%s failed submit ctrl_urb %d type %d", __func__,
+ "%s failed submit ctrl_urb %d type %d\n", __func__,
result, type);
return result;
}
@@ -1888,7 +1912,7 @@ static void intr_callback(struct urb *urb)
/* status check */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, NULL);
return;
}
D4("\n--- Got intr callback 0x%02X ---", status);
@@ -1905,18 +1929,18 @@ static void intr_callback(struct urb *urb)
if (serial != NULL) {
D1("Pending read interrupt on port %d\n", i);
spin_lock(&serial->serial_lock);
- if (serial->rx_state == RX_IDLE) {
+ if (serial->rx_state == RX_IDLE &&
+ serial->open_count > 0) {
/* Setup and send a ctrl req read on
* port i */
- if (!serial->rx_urb_filled[0]) {
+ if (!serial->rx_urb_filled[0]) {
serial->rx_state = RX_SENT;
hso_mux_serial_read(serial);
} else
serial->rx_state = RX_PENDING;
-
} else {
- D1("Already pending a read on "
- "port %d\n", i);
+ D1("Already a read pending on "
+ "port %d or port not open\n", i);
}
spin_unlock(&serial->serial_lock);
}
@@ -1958,7 +1982,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2014,7 +2038,7 @@ static void ctrl_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2358,12 +2382,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->tx_data_length = tx_size;
serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_data) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_buffer) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
@@ -2391,6 +2415,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
+ INIT_WORK(&hso_dev->reset_device, reset_device);
return hso_dev;
}
@@ -2831,13 +2856,14 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mux->shared_intr_urb) {
- dev_err(&interface->dev, "Could not allocate intr urb?");
+ dev_err(&interface->dev, "Could not allocate intr urb?\n");
goto exit;
}
- mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize,
- GFP_KERNEL);
+ mux->shared_intr_buf =
+ kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
+ GFP_KERNEL);
if (!mux->shared_intr_buf) {
- dev_err(&interface->dev, "Could not allocate intr buf?");
+ dev_err(&interface->dev, "Could not allocate intr buf?\n");
goto exit;
}
@@ -3132,6 +3158,26 @@ out:
return result;
}
+static void reset_device(struct work_struct *data)
+{
+ struct hso_device *hso_dev =
+ container_of(data, struct hso_device, reset_device);
+ struct usb_device *usb = hso_dev->usb;
+ int result;
+
+ if (hso_dev->usb_gone) {
+ D1("No reset during disconnect\n");
+ } else {
+ result = usb_lock_device_for_reset(usb, hso_dev->interface);
+ if (result < 0)
+ D1("unable to lock device for reset: %d\n", result);
+ else {
+ usb_reset_device(usb);
+ usb_unlock_device(usb);
+ }
+ }
+}
+
static void hso_serial_ref_free(struct kref *ref)
{
struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3232,13 +3278,13 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
usb_rcvintpipe(usb,
shared_int->intr_endp->bEndpointAddress & 0x7F),
shared_int->shared_intr_buf,
- shared_int->intr_endp->wMaxPacketSize,
+ 1,
intr_callback, shared_int,
shared_int->intr_endp->bInterval);
result = usb_submit_urb(shared_int->shared_intr_urb, gfp);
if (result)
- dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__,
+ dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__,
result);
return result;
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 55cf7081de10..be02a25da71a 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -29,6 +29,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/slab.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
@@ -51,7 +52,7 @@ static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
int len;
if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) {
- deverr(dev, "unexpected tiny rx frame");
+ netdev_err(dev->net, "unexpected tiny rx frame\n");
return 0;
}
@@ -138,25 +139,25 @@ static void int51x1_set_multicast(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
/* do not expect to see traffic of other PLCs */
filter |= PACKET_TYPE_PROMISCUOUS;
- devinfo(dev, "promiscuous mode enabled");
- } else if (netdev->mc_count ||
+ netdev_info(dev->net, "promiscuous mode enabled\n");
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
filter |= PACKET_TYPE_ALL_MULTICAST;
- devdbg(dev, "receive all multicast enabled");
+ netdev_dbg(dev->net, "receive all multicast enabled\n");
} else {
/* ~PROMISCUOUS, ~MULTICAST */
- devdbg(dev, "receive own packets only");
+ netdev_dbg(dev->net, "receive own packets only\n");
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- devwarn(dev, "Error allocating URB");
+ netdev_warn(dev->net, "Error allocating URB\n");
return;
}
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
- devwarn(dev, "Error allocating control msg");
+ netdev_warn(dev->net, "Error allocating control msg\n");
goto out;
}
@@ -173,7 +174,8 @@ static void int51x1_set_multicast(struct net_device *netdev)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- devwarn(dev, "Error submitting control msg, sts=%d", status);
+ netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
+ status);
goto out1;
}
return;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
new file mode 100644
index 000000000000..418825d26f90
--- /dev/null
+++ b/drivers/net/usb/ipheth.c
@@ -0,0 +1,569 @@
+/*
+ * ipheth.c - Apple iPhone USB Ethernet driver
+ *
+ * Copyright (c) 2009 Diego Giagio <diego@giagio.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of GIAGIO.COM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ *
+ * Attention: iPhone device must be paired, otherwise it won't respond to our
+ * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+
+#define USB_VENDOR_APPLE 0x05ac
+#define USB_PRODUCT_IPHONE 0x1290
+#define USB_PRODUCT_IPHONE_3G 0x1292
+#define USB_PRODUCT_IPHONE_3GS 0x1294
+
+#define IPHETH_USBINTF_CLASS 255
+#define IPHETH_USBINTF_SUBCLASS 253
+#define IPHETH_USBINTF_PROTO 1
+
+#define IPHETH_BUF_SIZE 1516
+#define IPHETH_TX_TIMEOUT (5 * HZ)
+
+#define IPHETH_INTFNUM 2
+#define IPHETH_ALT_INTFNUM 1
+
+#define IPHETH_CTRL_ENDP 0x00
+#define IPHETH_CTRL_BUF_SIZE 0x40
+#define IPHETH_CTRL_TIMEOUT (5 * HZ)
+
+#define IPHETH_CMD_GET_MACADDR 0x00
+#define IPHETH_CMD_CARRIER_CHECK 0x45
+
+#define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ)
+#define IPHETH_CARRIER_ON 0x04
+
+static struct usb_device_id ipheth_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ipheth_table);
+
+struct ipheth_device {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct net_device *net;
+ struct sk_buff *tx_skb;
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+ unsigned char *tx_buf;
+ unsigned char *rx_buf;
+ unsigned char *ctrl_buf;
+ u8 bulk_in;
+ u8 bulk_out;
+ struct delayed_work carrier_work;
+};
+
+static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
+
+static int ipheth_alloc_urbs(struct ipheth_device *iphone)
+{
+ struct urb *tx_urb = NULL;
+ struct urb *rx_urb = NULL;
+ u8 *tx_buf = NULL;
+ u8 *rx_buf = NULL;
+
+ tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (tx_urb == NULL)
+ goto error_nomem;
+
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (rx_urb == NULL)
+ goto free_tx_urb;
+
+ tx_buf = usb_buffer_alloc(iphone->udev,
+ IPHETH_BUF_SIZE,
+ GFP_KERNEL,
+ &tx_urb->transfer_dma);
+ if (tx_buf == NULL)
+ goto free_rx_urb;
+
+ rx_buf = usb_buffer_alloc(iphone->udev,
+ IPHETH_BUF_SIZE,
+ GFP_KERNEL,
+ &rx_urb->transfer_dma);
+ if (rx_buf == NULL)
+ goto free_tx_buf;
+
+
+ iphone->tx_urb = tx_urb;
+ iphone->rx_urb = rx_urb;
+ iphone->tx_buf = tx_buf;
+ iphone->rx_buf = rx_buf;
+ return 0;
+
+free_tx_buf:
+ usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
+ tx_urb->transfer_dma);
+free_rx_urb:
+ usb_free_urb(rx_urb);
+free_tx_urb:
+ usb_free_urb(tx_urb);
+error_nomem:
+ return -ENOMEM;
+}
+
+static void ipheth_free_urbs(struct ipheth_device *iphone)
+{
+ usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ iphone->rx_urb->transfer_dma);
+ usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
+ iphone->tx_urb->transfer_dma);
+ usb_free_urb(iphone->rx_urb);
+ usb_free_urb(iphone->tx_urb);
+}
+
+static void ipheth_kill_urbs(struct ipheth_device *dev)
+{
+ usb_kill_urb(dev->tx_urb);
+ usb_kill_urb(dev->rx_urb);
+}
+
+static void ipheth_rcvbulk_callback(struct urb *urb)
+{
+ struct ipheth_device *dev;
+ struct sk_buff *skb;
+ int status;
+ char *buf;
+ int len;
+
+ dev = urb->context;
+ if (dev == NULL)
+ return;
+
+ status = urb->status;
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ return;
+ case 0:
+ break;
+ default:
+ err("%s: urb status: %d", __func__, urb->status);
+ return;
+ }
+
+ len = urb->actual_length;
+ buf = urb->transfer_buffer;
+
+ skb = dev_alloc_skb(NET_IP_ALIGN + len);
+ if (!skb) {
+ err("%s: dev_alloc_skb: -ENOMEM", __func__);
+ dev->net->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
+ skb->dev = dev->net;
+ skb->protocol = eth_type_trans(skb, dev->net);
+
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += len;
+
+ netif_rx(skb);
+ ipheth_rx_submit(dev, GFP_ATOMIC);
+}
+
+static void ipheth_sndbulk_callback(struct urb *urb)
+{
+ struct ipheth_device *dev;
+
+ dev = urb->context;
+ if (dev == NULL)
+ return;
+
+ if (urb->status != 0 &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN)
+ err("%s: urb status: %d", __func__, urb->status);
+
+ dev_kfree_skb_irq(dev->tx_skb);
+ netif_wake_queue(dev->net);
+}
+
+static int ipheth_carrier_set(struct ipheth_device *dev)
+{
+ struct usb_device *udev = dev->udev;
+ int retval;
+
+ retval = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
+ IPHETH_CMD_CARRIER_CHECK, /* request */
+ 0xc0, /* request type */
+ 0x00, /* value */
+ 0x02, /* index */
+ dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
+ IPHETH_CTRL_TIMEOUT);
+ if (retval < 0) {
+ err("%s: usb_control_msg: %d", __func__, retval);
+ return retval;
+ }
+
+ if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
+ netif_carrier_on(dev->net);
+ else
+ netif_carrier_off(dev->net);
+
+ return 0;
+}
+
+static void ipheth_carrier_check_work(struct work_struct *work)
+{
+ struct ipheth_device *dev = container_of(work, struct ipheth_device,
+ carrier_work.work);
+
+ ipheth_carrier_set(dev);
+ schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
+}
+
+static int ipheth_get_macaddr(struct ipheth_device *dev)
+{
+ struct usb_device *udev = dev->udev;
+ struct net_device *net = dev->net;
+ int retval;
+
+ retval = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
+ IPHETH_CMD_GET_MACADDR, /* request */
+ 0xc0, /* request type */
+ 0x00, /* value */
+ 0x02, /* index */
+ dev->ctrl_buf,
+ IPHETH_CTRL_BUF_SIZE,
+ IPHETH_CTRL_TIMEOUT);
+ if (retval < 0) {
+ err("%s: usb_control_msg: %d", __func__, retval);
+ } else if (retval < ETH_ALEN) {
+ err("%s: usb_control_msg: short packet: %d bytes",
+ __func__, retval);
+ retval = -EINVAL;
+ } else {
+ memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
+ retval = 0;
+ }
+
+ return retval;
+}
+
+static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
+{
+ struct usb_device *udev = dev->udev;
+ int retval;
+
+ usb_fill_bulk_urb(dev->rx_urb, udev,
+ usb_rcvbulkpipe(udev, dev->bulk_in),
+ dev->rx_buf, IPHETH_BUF_SIZE,
+ ipheth_rcvbulk_callback,
+ dev);
+ dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ retval = usb_submit_urb(dev->rx_urb, mem_flags);
+ if (retval)
+ err("%s: usb_submit_urb: %d", __func__, retval);
+ return retval;
+}
+
+static int ipheth_open(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ struct usb_device *udev = dev->udev;
+ int retval = 0;
+
+ usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM);
+
+ retval = ipheth_carrier_set(dev);
+ if (retval)
+ return retval;
+
+ retval = ipheth_rx_submit(dev, GFP_KERNEL);
+ if (retval)
+ return retval;
+
+ schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
+ netif_start_queue(net);
+ return retval;
+}
+
+static int ipheth_close(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+
+ cancel_delayed_work_sync(&dev->carrier_work);
+ netif_stop_queue(net);
+ return 0;
+}
+
+static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ struct usb_device *udev = dev->udev;
+ int retval;
+
+ /* Paranoid */
+ if (skb->len > IPHETH_BUF_SIZE) {
+ WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+ dev->net->stats.tx_dropped++;
+ dev_kfree_skb_irq(skb);
+ return NETDEV_TX_OK;
+ }
+
+ memcpy(dev->tx_buf, skb->data, skb->len);
+ if (skb->len < IPHETH_BUF_SIZE)
+ memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len);
+
+ usb_fill_bulk_urb(dev->tx_urb, udev,
+ usb_sndbulkpipe(udev, dev->bulk_out),
+ dev->tx_buf, IPHETH_BUF_SIZE,
+ ipheth_sndbulk_callback,
+ dev);
+ dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
+ if (retval) {
+ err("%s: usb_submit_urb: %d", __func__, retval);
+ dev->net->stats.tx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ dev->tx_skb = skb;
+
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += skb->len;
+ netif_stop_queue(net);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void ipheth_tx_timeout(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+
+ err("%s: TX timeout", __func__);
+ dev->net->stats.tx_errors++;
+ usb_unlink_urb(dev->tx_urb);
+}
+
+static struct net_device_stats *ipheth_stats(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ return &dev->net->stats;
+}
+
+static u32 ipheth_ethtool_op_get_link(struct net_device *net)
+{
+ struct ipheth_device *dev = netdev_priv(net);
+ return netif_carrier_ok(dev->net);
+}
+
+static struct ethtool_ops ops = {
+ .get_link = ipheth_ethtool_op_get_link
+};
+
+static const struct net_device_ops ipheth_netdev_ops = {
+ .ndo_open = &ipheth_open,
+ .ndo_stop = &ipheth_close,
+ .ndo_start_xmit = &ipheth_tx,
+ .ndo_tx_timeout = &ipheth_tx_timeout,
+ .ndo_get_stats = &ipheth_stats,
+};
+
+static struct device_type ipheth_type = {
+ .name = "wwan",
+};
+
+static int ipheth_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_host_interface *hintf;
+ struct usb_endpoint_descriptor *endp;
+ struct ipheth_device *dev;
+ struct net_device *netdev;
+ int i;
+ int retval;
+
+ netdev = alloc_etherdev(sizeof(struct ipheth_device));
+ if (!netdev)
+ return -ENOMEM;
+
+ netdev->netdev_ops = &ipheth_netdev_ops;
+ netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
+ strcpy(netdev->name, "wwan%d");
+
+ dev = netdev_priv(netdev);
+ dev->udev = udev;
+ dev->net = netdev;
+ dev->intf = intf;
+
+ /* Set up endpoints */
+ hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
+ if (hintf == NULL) {
+ retval = -ENODEV;
+ err("Unable to find alternate settings interface");
+ goto err_endpoints;
+ }
+
+ for (i = 0; i < hintf->desc.bNumEndpoints; i++) {
+ endp = &hintf->endpoint[i].desc;
+ if (usb_endpoint_is_bulk_in(endp))
+ dev->bulk_in = endp->bEndpointAddress;
+ else if (usb_endpoint_is_bulk_out(endp))
+ dev->bulk_out = endp->bEndpointAddress;
+ }
+ if (!(dev->bulk_in && dev->bulk_out)) {
+ retval = -ENODEV;
+ err("Unable to find endpoints");
+ goto err_endpoints;
+ }
+
+ dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL);
+ if (dev->ctrl_buf == NULL) {
+ retval = -ENOMEM;
+ goto err_alloc_ctrl_buf;
+ }
+
+ retval = ipheth_get_macaddr(dev);
+ if (retval)
+ goto err_get_macaddr;
+
+ INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work);
+
+ retval = ipheth_alloc_urbs(dev);
+ if (retval) {
+ err("error allocating urbs: %d", retval);
+ goto err_alloc_urbs;
+ }
+
+ usb_set_intfdata(intf, dev);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+ SET_ETHTOOL_OPS(netdev, &ops);
+ SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
+
+ retval = register_netdev(netdev);
+ if (retval) {
+ err("error registering netdev: %d", retval);
+ retval = -EIO;
+ goto err_register_netdev;
+ }
+
+ dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
+ return 0;
+
+err_register_netdev:
+ ipheth_free_urbs(dev);
+err_alloc_urbs:
+err_get_macaddr:
+err_alloc_ctrl_buf:
+ kfree(dev->ctrl_buf);
+err_endpoints:
+ free_netdev(netdev);
+ return retval;
+}
+
+static void ipheth_disconnect(struct usb_interface *intf)
+{
+ struct ipheth_device *dev;
+
+ dev = usb_get_intfdata(intf);
+ if (dev != NULL) {
+ unregister_netdev(dev->net);
+ ipheth_kill_urbs(dev);
+ ipheth_free_urbs(dev);
+ kfree(dev->ctrl_buf);
+ free_netdev(dev->net);
+ }
+ usb_set_intfdata(intf, NULL);
+ dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n");
+}
+
+static struct usb_driver ipheth_driver = {
+ .name = "ipheth",
+ .probe = ipheth_probe,
+ .disconnect = ipheth_disconnect,
+ .id_table = ipheth_table,
+};
+
+static int __init ipheth_init(void)
+{
+ int retval;
+
+ retval = usb_register(&ipheth_driver);
+ if (retval) {
+ err("usb_register failed: %d", retval);
+ return retval;
+ }
+ return 0;
+}
+
+static void __exit ipheth_exit(void)
+{
+ usb_deregister(&ipheth_driver);
+}
+
+module_init(ipheth_init);
+module_exit(ipheth_exit);
+
+MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
+MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f1d64ef67efa..c4c334d9770f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
{ USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
{ USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
{ USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
+ { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
{ USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
{ USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
{ USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
@@ -881,7 +882,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
if (net->flags & IFF_PROMISC) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
}
- else if ((net->mc_count) || (net->flags & IFF_ALLMULTI)) {
+ else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 87374317f480..9f24e3f871e1 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,13 +1,27 @@
/*
- * MosChips MCS7830 based USB 2.0 Ethernet Devices
+ * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
*
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
*
+ * Copyright (C) 2010 Andreas Mohr <andi@lisas.de>
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (c) 2002-2003 TiVo Inc.
*
+ * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
+ *
+ * TODO:
+ * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
+ * - implement ethtool_ops get_pauseparam/set_pauseparam
+ * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!)
+ * - implement get_eeprom/[set_eeprom]
+ * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII)
+ * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs,
+ * can access only ~ 24, remaining user buffer is uninitialized garbage
+ * - anything else?
+ *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -30,6 +44,7 @@
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
@@ -55,7 +70,7 @@
ADVERTISE_100HALF | ADVERTISE_10FULL | \
ADVERTISE_10HALF | ADVERTISE_CSMA)
-/* HIF_REG_XX coressponding index value */
+/* HIF_REG_XX corresponding index value */
enum {
HIF_REG_MULTICAST_HASH = 0x00,
HIF_REG_PACKET_GAP1 = 0x08,
@@ -69,6 +84,7 @@ enum {
HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80,
HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40,
HIF_REG_CONFIG = 0x0e,
+ /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */
HIF_REG_CONFIG_CFG = 0x80,
HIF_REG_CONFIG_SPEED100 = 0x40,
HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20,
@@ -76,13 +92,24 @@ enum {
HIF_REG_CONFIG_TXENABLE = 0x08,
HIF_REG_CONFIG_SLEEPMODE = 0x04,
HIF_REG_CONFIG_ALLMULTICAST = 0x02,
- HIF_REG_CONFIG_PROMISCIOUS = 0x01,
+ HIF_REG_CONFIG_PROMISCUOUS = 0x01,
HIF_REG_ETHERNET_ADDR = 0x0f,
- HIF_REG_22 = 0x15,
+ HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */
HIF_REG_PAUSE_THRESHOLD = 0x16,
HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0,
};
+/* Trailing status byte in Ethernet Rx frame */
+enum {
+ MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */
+ MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */
+ MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */
+ MCS7830_RX_CRC_ERROR = 0x08,
+ MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */
+ MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */
+ /* [7:6] reserved */
+};
+
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
@@ -109,7 +136,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
return ret;
}
-static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
+static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
{
struct usb_device *xdev = dev->udev;
int ret;
@@ -183,13 +210,43 @@ out:
usb_free_urb(urb);
}
-static int mcs7830_get_address(struct usbnet *dev)
+static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
{
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- dev->net->dev_addr);
+ struct usbnet *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = mcs7830_hif_set_mac_address(dev, addr->sa_data);
+
if (ret < 0)
return ret;
+
+ /* it worked --> adopt it on netdev side */
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
return 0;
}
@@ -307,7 +364,7 @@ static int mcs7830_get_rev(struct usbnet *dev)
{
u8 dummy[2];
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_22, 2, dummy);
+ ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy);
if (ret > 0)
return 2; /* Rev C or later */
return 1; /* earlier revision */
@@ -331,33 +388,6 @@ static void mcs7830_rev_C_fixup(struct usbnet *dev)
}
}
-static int mcs7830_init_dev(struct usbnet *dev)
-{
- int ret;
- int retry;
-
- /* Read MAC address from EEPROM */
- ret = -EINVAL;
- for (retry = 0; retry < 5 && ret; retry++)
- ret = mcs7830_get_address(dev);
- if (ret) {
- dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
- goto out;
- }
-
- /* Set up PHY */
- ret = mcs7830_set_autoneg(dev, 0);
- if (ret) {
- dev_info(&dev->udev->dev, "Cannot set autoneg\n");
- goto out;
- }
-
- mcs7830_rev_C_fixup(dev);
- ret = 0;
-out:
- return ret;
-}
-
static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
int location)
{
@@ -378,11 +408,33 @@ static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
-/* credits go to asix_set_multicast */
-static void mcs7830_set_multicast(struct net_device *net)
+static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev)
+{
+ return (struct mcs7830_data *)&dev->data;
+}
+
+static void mcs7830_hif_update_multicast_hash(struct usbnet *dev)
+{
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
+ sizeof data->multi_filter,
+ data->multi_filter);
+}
+
+static void mcs7830_hif_update_config(struct usbnet *dev)
+{
+ /* implementation specific to data->config
+ (argument needs to be heap-based anyway - USB DMA!) */
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+}
+
+static void mcs7830_data_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- struct mcs7830_data *data = (struct mcs7830_data *)&dev->data;
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+
+ memset(data->multi_filter, 0, sizeof data->multi_filter);
data->config = HIF_REG_CONFIG_TXENABLE;
@@ -390,36 +442,64 @@ static void mcs7830_set_multicast(struct net_device *net)
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
if (net->flags & IFF_PROMISC) {
- data->config |= HIF_REG_CONFIG_PROMISCIOUS;
+ data->config |= HIF_REG_CONFIG_PROMISCUOUS;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > MCS7830_MAX_MCAST) {
+ netdev_mc_count(net) > MCS7830_MAX_MCAST) {
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
-
- memset(data->multi_filter, 0, sizeof data->multi_filter);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
+ }
+}
- mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
- sizeof data->multi_filter,
- data->multi_filter);
+static int mcs7830_apply_base_config(struct usbnet *dev)
+{
+ int ret;
+
+ /* re-configure known MAC (suspend case etc.) */
+ ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set MAC address\n");
+ goto out;
}
- mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+ /* Set up PHY */
+ ret = mcs7830_set_autoneg(dev, 0);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set autoneg\n");
+ goto out;
+ }
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
+
+ mcs7830_rev_C_fixup(dev);
+ ret = 0;
+out:
+ return ret;
+}
+
+/* credits go to asix_set_multicast */
+static void mcs7830_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ mcs7830_data_set_multicast(net);
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
}
static int mcs7830_get_regs_len(struct net_device *net)
@@ -463,29 +543,6 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
-{
- int ret;
- struct usbnet *dev = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (netif_running(netdev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- netdev->dev_addr);
-
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
@@ -495,21 +552,32 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
.ndo_set_multicast_list = mcs7830_set_multicast,
- .ndo_set_mac_address = mcs7830_set_mac_address,
+ .ndo_set_mac_address = mcs7830_set_mac_address,
};
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
{
struct net_device *net = dev->net;
int ret;
+ int retry;
- ret = mcs7830_init_dev(dev);
+ /* Initial startup: Gather MAC address setting from EEPROM */
+ ret = -EINVAL;
+ for (retry = 0; retry < 5 && ret; retry++)
+ ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
+ if (ret) {
+ dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
+ goto out;
+ }
+
+ mcs7830_data_set_multicast(net);
+
+ ret = mcs7830_apply_base_config(dev);
if (ret)
goto out;
net->ethtool_ops = &mcs7830_ethtool_ops;
net->netdev_ops = &mcs7830_netdev_ops;
- mcs7830_set_multicast(net);
/* reserve space for the status byte on rx */
dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -526,7 +594,7 @@ out:
return ret;
}
-/* The chip always appends a status bytes that we need to strip */
+/* The chip always appends a status byte that we need to strip */
static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
@@ -539,9 +607,23 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_trim(skb, skb->len - 1);
status = skb->data[skb->len];
- if (status != 0x20)
+ if (status != MCS7830_RX_FRAME_CORRECT) {
dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status);
+ /* hmm, perhaps usbnet.c already sees a globally visible
+ frame error and increments rx_errors on its own already? */
+ dev->net->stats.rx_errors++;
+
+ if (status & (MCS7830_RX_SHORT_FRAME
+ |MCS7830_RX_LENGTH_ERROR
+ |MCS7830_RX_LARGE_FRAME))
+ dev->net->stats.rx_length_errors++;
+ if (status & MCS7830_RX_ALIGNMENT_ERROR)
+ dev->net->stats.rx_frame_errors++;
+ if (status & MCS7830_RX_CRC_ERROR)
+ dev->net->stats.rx_crc_errors++;
+ }
+
return skb->len > 0;
}
@@ -580,6 +662,20 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
+static int mcs7830_reset_resume (struct usb_interface *intf)
+{
+ /* YES, this function is successful enough that ethtool -d
+ does show same output pre-/post-suspend */
+
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ mcs7830_apply_base_config(dev);
+
+ usbnet_resume(intf);
+
+ return 0;
+}
+
static struct usb_driver mcs7830_driver = {
.name = driver_name,
.id_table = products,
@@ -587,6 +683,7 @@ static struct usb_driver mcs7830_driver = {
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
+ .reset_resume = mcs7830_reset_resume,
};
static int __init mcs7830_init(void)
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index aeb1ab03a9ee..961a8ed38d8f 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -29,6 +29,7 @@
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -203,25 +204,23 @@ static void nc_dump_registers(struct usbnet *dev)
static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
{
- if (!netif_msg_link(dev))
- return;
- devdbg(dev, "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s;"
- " this%s%s;"
- " other%s%s; r/o 0x%x",
- dev->udev->bus->bus_name, dev->udev->devpath,
- usbctl,
- (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "",
- (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "",
- (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "",
- (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "",
- (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "",
-
- (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "",
- (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "",
- (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "",
- (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "",
- usbctl & ~USBCTL_WRITABLE_MASK
- );
+ netif_dbg(dev, link, dev->net,
+ "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s; this%s%s; other%s%s; r/o 0x%x\n",
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ usbctl,
+ (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "",
+ (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "",
+ (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "",
+ (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "",
+ (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "",
+
+ (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "",
+ (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "",
+
+ (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "",
+ (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "",
+
+ usbctl & ~USBCTL_WRITABLE_MASK);
}
/*-------------------------------------------------------------------------*/
@@ -248,30 +247,26 @@ static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
static inline void nc_dump_status(struct usbnet *dev, u16 status)
{
- if (!netif_msg_link(dev))
- return;
- devdbg(dev, "net1080 %s-%s status 0x%x:"
- " this (%c) PKT=%d%s%s%s;"
- " other PKT=%d%s%s%s; unspec 0x%x",
- dev->udev->bus->bus_name, dev->udev->devpath,
- status,
-
- // XXX the packet counts don't seem right
- // (1 at reset, not 0); maybe UNSPEC too
-
- (status & STATUS_PORT_A) ? 'A' : 'B',
- STATUS_PACKETS_THIS(status),
- (status & STATUS_CONN_THIS) ? " CON" : "",
- (status & STATUS_SUSPEND_THIS) ? " SUS" : "",
- (status & STATUS_MAILBOX_THIS) ? " MBOX" : "",
-
- STATUS_PACKETS_OTHER(status),
- (status & STATUS_CONN_OTHER) ? " CON" : "",
- (status & STATUS_SUSPEND_OTHER) ? " SUS" : "",
- (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
-
- status & STATUS_UNSPEC_MASK
- );
+ netif_dbg(dev, link, dev->net,
+ "net1080 %s-%s status 0x%x: this (%c) PKT=%d%s%s%s; other PKT=%d%s%s%s; unspec 0x%x\n",
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ status,
+
+ // XXX the packet counts don't seem right
+ // (1 at reset, not 0); maybe UNSPEC too
+
+ (status & STATUS_PORT_A) ? 'A' : 'B',
+ STATUS_PACKETS_THIS(status),
+ (status & STATUS_CONN_THIS) ? " CON" : "",
+ (status & STATUS_SUSPEND_THIS) ? " SUS" : "",
+ (status & STATUS_MAILBOX_THIS) ? " MBOX" : "",
+
+ STATUS_PACKETS_OTHER(status),
+ (status & STATUS_CONN_OTHER) ? " CON" : "",
+ (status & STATUS_SUSPEND_OTHER) ? " SUS" : "",
+ (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
+
+ status & STATUS_UNSPEC_MASK);
}
/*-------------------------------------------------------------------------*/
@@ -286,10 +281,9 @@ static inline void nc_dump_status(struct usbnet *dev, u16 status)
static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
{
- if (netif_msg_link(dev))
- devdbg(dev, "net1080 %s-%s ttl 0x%x this = %d, other = %d",
- dev->udev->bus->bus_name, dev->udev->devpath,
- ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
+ netif_dbg(dev, link, dev->net, "net1080 %s-%s ttl 0x%x this = %d, other = %d\n",
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
}
/*-------------------------------------------------------------------------*/
@@ -334,11 +328,9 @@ static int net1080_reset(struct usbnet *dev)
MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
- if (netif_msg_link(dev))
- devinfo(dev, "port %c, peer %sconnected",
- (status & STATUS_PORT_A) ? 'A' : 'B',
- (status & STATUS_CONN_OTHER) ? "" : "dis"
- );
+ netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
+ (status & STATUS_PORT_A) ? 'A' : 'B',
+ (status & STATUS_CONN_OTHER) ? "" : "dis");
retval = 0;
done:
@@ -415,8 +407,8 @@ static void nc_ensure_sync(struct usbnet *dev)
return;
}
- if (netif_msg_rx_err(dev))
- devdbg(dev, "flush net1080; too many framing errors");
+ netif_dbg(dev, rx_err, dev->net,
+ "flush net1080; too many framing errors\n");
dev->frame_errors = 0;
}
}
@@ -486,8 +478,8 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
}
#if 0
- devdbg(dev, "frame <rx h %d p %d id %d", header->hdr_len,
- header->packet_len, header->packet_id);
+ netdev_dbg(dev->net, "frame <rx h %d p %d id %d\n", header->hdr_len,
+ header->packet_len, header->packet_id);
#endif
dev->frame_errors = 0;
return 1;
@@ -547,9 +539,9 @@ encapsulate:
trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer);
put_unaligned(header->packet_id, &trailer->packet_id);
#if 0
- devdbg(dev, "frame >tx h %d p %d id %d",
- header->hdr_len, header->packet_len,
- header->packet_id);
+ netdev_dbg(dev->net, "frame >tx h %d p %d id %d\n",
+ header->hdr_len, header->packet_len,
+ header->packet_id);
#endif
return skb;
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index ed4a508ef262..41838773b568 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -132,9 +132,10 @@ static void ctrl_callback(struct urb *urb)
case -ENOENT:
break;
default:
- if (netif_msg_drv(pegasus) && printk_ratelimit())
- dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
- __func__, status);
+ if (net_ratelimit())
+ netif_dbg(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, status);
+ break;
}
pegasus->flags &= ~ETH_REGS_CHANGED;
wake_up(&pegasus->ctrl_wait);
@@ -149,9 +150,8 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer) {
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
- __func__);
+ netif_warn(pegasus, drv, pegasus->net,
+ "out of memory in %s\n", __func__);
return -ENOMEM;
}
add_wait_queue(&pegasus->ctrl_wait, &wait);
@@ -181,9 +181,9 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
set_current_state(TASK_RUNNING);
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus) && printk_ratelimit())
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ if (net_ratelimit())
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
goto out;
}
@@ -205,9 +205,8 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer) {
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
- __func__);
+ netif_warn(pegasus, drv, pegasus->net,
+ "out of memory in %s\n", __func__);
return -ENOMEM;
}
memcpy(buffer, data, size);
@@ -237,9 +236,8 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus))
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
goto out;
}
@@ -259,9 +257,8 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
tmp = kmalloc(1, GFP_KERNEL);
if (!tmp) {
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
- __func__);
+ netif_warn(pegasus, drv, pegasus->net,
+ "out of memory in %s\n", __func__);
return -ENOMEM;
}
memcpy(tmp, &data, 1);
@@ -290,9 +287,9 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus) && printk_ratelimit())
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ if (net_ratelimit())
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
goto out;
}
@@ -323,9 +320,8 @@ static int update_eth_regs_async(pegasus_t * pegasus)
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus))
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
}
return ret;
@@ -349,14 +345,16 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
if (data[0] & PHY_DONE)
break;
}
- if (i < REG_TIMEOUT) {
- ret = get_registers(pegasus, PhyData, 2, &regdi);
- *regd = le16_to_cpu(regdi);
- return ret;
- }
+
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ ret = get_registers(pegasus, PhyData, 2, &regdi);
+ *regd = le16_to_cpu(regdi);
+ return ret;
+
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
@@ -388,12 +386,14 @@ static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
if (data[0] & PHY_DONE)
break;
}
- if (i < REG_TIMEOUT)
- return ret;
+
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ return ret;
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
@@ -422,15 +422,15 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
if (ret == -ESHUTDOWN)
goto fail;
}
- if (i < REG_TIMEOUT) {
- ret = get_registers(pegasus, EpromData, 2, &retdatai);
- *retdata = le16_to_cpu(retdatai);
- return ret;
- }
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ ret = get_registers(pegasus, EpromData, 2, &retdatai);
+ *retdata = le16_to_cpu(retdatai);
+ return ret;
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
@@ -475,11 +475,13 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
break;
}
disable_eprom_write(pegasus);
- if (i < REG_TIMEOUT)
- return ret;
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ return ret;
+
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
#endif /* PEGASUS_WRITE_EEPROM */
@@ -642,25 +644,20 @@ static void read_bulk_callback(struct urb *urb)
case 0:
break;
case -ETIME:
- if (netif_msg_rx_err(pegasus))
- pr_debug("%s: reset MAC\n", net->name);
+ netif_dbg(pegasus, rx_err, net, "reset MAC\n");
pegasus->flags &= ~PEGASUS_RX_BUSY;
break;
case -EPIPE: /* stall, or disconnect from TT */
/* FIXME schedule work to clear the halt */
- if (netif_msg_rx_err(pegasus))
- printk(KERN_WARNING "%s: no rx stall recovery\n",
- net->name);
+ netif_warn(pegasus, rx_err, net, "no rx stall recovery\n");
return;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
- if (netif_msg_ifdown(pegasus))
- pr_debug("%s: rx unlink, %d\n", net->name, status);
+ netif_dbg(pegasus, ifdown, net, "rx unlink, %d\n", status);
return;
default:
- if (netif_msg_rx_err(pegasus))
- pr_debug("%s: RX status %d\n", net->name, status);
+ netif_dbg(pegasus, rx_err, net, "RX status %d\n", status);
goto goon;
}
@@ -669,9 +666,8 @@ static void read_bulk_callback(struct urb *urb)
rx_status = buf[count - 2];
if (rx_status & 0x1e) {
- if (netif_msg_rx_err(pegasus))
- pr_debug("%s: RX packet error %x\n",
- net->name, rx_status);
+ netif_dbg(pegasus, rx_err, net,
+ "RX packet error %x\n", rx_status);
pegasus->stats.rx_errors++;
if (rx_status & 0x06) // long or runt
pegasus->stats.rx_length_errors++;
@@ -758,9 +754,7 @@ static void rx_fixup(unsigned long data)
pegasus->rx_skb = pull_skb(pegasus);
}
if (pegasus->rx_skb == NULL) {
- if (netif_msg_rx_err(pegasus))
- printk(KERN_WARNING "%s: low on memory\n",
- pegasus->net->name);
+ netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
tasklet_schedule(&pegasus->rx_tl);
goto done;
}
@@ -800,19 +794,15 @@ static void write_bulk_callback(struct urb *urb)
case -EPIPE:
/* FIXME schedule_work() to clear the tx halt */
netif_stop_queue(net);
- if (netif_msg_tx_err(pegasus))
- printk(KERN_WARNING "%s: no tx stall recovery\n",
- net->name);
+ netif_warn(pegasus, tx_err, net, "no tx stall recovery\n");
return;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
- if (netif_msg_ifdown(pegasus))
- pr_debug("%s: tx unlink, %d\n", net->name, status);
+ netif_dbg(pegasus, ifdown, net, "tx unlink, %d\n", status);
return;
default:
- if (netif_msg_tx_err(pegasus))
- pr_info("%s: TX status %d\n", net->name, status);
+ netif_info(pegasus, tx_err, net, "TX status %d\n", status);
/* FALL THROUGH */
case 0:
break;
@@ -843,9 +833,7 @@ static void intr_callback(struct urb *urb)
/* some Pegasus-I products report LOTS of data
* toggle errors... avoid log spamming
*/
- if (netif_msg_timer(pegasus))
- pr_debug("%s: intr status %d\n", net->name,
- status);
+ netif_dbg(pegasus, timer, net, "intr status %d\n", status);
}
if (urb->actual_length >= 6) {
@@ -875,16 +863,15 @@ static void intr_callback(struct urb *urb)
res = usb_submit_urb(urb, GFP_ATOMIC);
if (res == -ENODEV)
netif_device_detach(pegasus->net);
- if (res && netif_msg_timer(pegasus))
- printk(KERN_ERR "%s: can't resubmit interrupt urb, %d\n",
- net->name, res);
+ if (res)
+ netif_err(pegasus, timer, net,
+ "can't resubmit interrupt urb, %d\n", res);
}
static void pegasus_tx_timeout(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
- if (netif_msg_timer(pegasus))
- printk(KERN_WARNING "%s: tx timeout\n", net->name);
+ netif_warn(pegasus, timer, net, "tx timeout\n");
usb_unlink_urb(pegasus->tx_urb);
pegasus->stats.tx_errors++;
}
@@ -906,9 +893,7 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
pegasus->tx_buff, count,
write_bulk_callback, pegasus);
if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) {
- if (netif_msg_tx_err(pegasus))
- printk(KERN_WARNING "%s: fail tx, %d\n",
- net->name, res);
+ netif_warn(pegasus, tx_err, net, "fail tx, %d\n", res);
switch (res) {
case -EPIPE: /* stall, or disconnect from TT */
/* cleanup should already have been scheduled */
@@ -952,10 +937,9 @@ static inline void get_interrupt_interval(pegasus_t * pegasus)
interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
if (interval < 0x80) {
- if (netif_msg_timer(pegasus))
- dev_info(&pegasus->intf->dev, "intr interval "
- "changed from %ums to %ums\n",
- interval, 0x80);
+ netif_info(pegasus, timer, pegasus->net,
+ "intr interval changed from %ums to %ums\n",
+ interval, 0x80);
interval = 0x80;
data = (data & 0x00FF) | ((u16)interval << 8);
#ifdef PEGASUS_WRITE_EEPROM
@@ -1046,8 +1030,7 @@ static int pegasus_open(struct net_device *net)
if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
if (res == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: failed rx_urb, %d", net->name, res);
+ netif_dbg(pegasus, ifup, net, "failed rx_urb, %d\n", res);
goto exit;
}
@@ -1058,15 +1041,13 @@ static int pegasus_open(struct net_device *net)
if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
if (res == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: failed intr_urb, %d\n", net->name, res);
+ netif_dbg(pegasus, ifup, net, "failed intr_urb, %d\n", res);
usb_kill_urb(pegasus->rx_urb);
goto exit;
}
if ((res = enable_net_traffic(net, pegasus->usb))) {
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: can't enable_net_traffic() - %d\n",
- net->name, res);
+ netif_dbg(pegasus, ifup, net,
+ "can't enable_net_traffic() - %d\n", res);
res = -EIO;
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->intr_urb);
@@ -1075,8 +1056,7 @@ static int pegasus_open(struct net_device *net)
}
set_carrier(net);
netif_start_queue(net);
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: open\n", net->name);
+ netif_dbg(pegasus, ifup, net, "open\n");
res = 0;
exit:
return res;
@@ -1230,13 +1210,11 @@ static void pegasus_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
- if (netif_msg_link(pegasus))
- pr_info("%s: Promiscuous mode enabled.\n", net->name);
- } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
+ netif_info(pegasus, link, net, "Promiscuous mode enabled\n");
+ } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
- if (netif_msg_link(pegasus))
- pr_debug("%s: set allmulti\n", net->name);
+ netif_dbg(pegasus, link, net, "set allmulti\n");
} else {
pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index 5d02f0200737..b90d8766ab74 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -177,7 +177,7 @@ PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c,
PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
+ DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046,
@@ -208,6 +208,8 @@ PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
*/
PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
DEFAULT_GPIO_RESET | PEGASUS_II )
+PEGASUS_DEV( "Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122,
+ DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
@@ -249,7 +251,7 @@ PEGASUS_DEV( "GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
PEGASUS_DEV( "Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
+ DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 490fa8f55424..dd8a4adf48ca 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -22,6 +22,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
+#include <linux/slab.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
@@ -57,8 +58,8 @@
*/
void rndis_status(struct usbnet *dev, struct urb *urb)
{
- devdbg(dev, "rndis status urb, len %d stat %d",
- urb->actual_length, urb->status);
+ netdev_dbg(dev->net, "rndis status urb, len %d stat %d\n",
+ urb->actual_length, urb->status);
// FIXME for keepalives, respond immediately (asynchronously)
// if not an RNDIS status, do like cdc_status(dev,urb) does
}
@@ -335,8 +336,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
if (dev->maxpacket == 0) {
- if (netif_msg_probe(dev))
- dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n");
+ netif_dbg(dev, probe, dev->net,
+ "dev->maxpacket can't be 0\n");
retval = -EINVAL;
goto fail_and_release;
}
@@ -394,17 +395,15 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
}
if ((flags & FLAG_RNDIS_PHYM_WIRELESS) &&
*phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
- if (netif_msg_probe(dev))
- dev_dbg(&intf->dev, "driver requires wireless "
- "physical medium, but device is not.\n");
+ netif_dbg(dev, probe, dev->net,
+ "driver requires wireless physical medium, but device is not\n");
retval = -ENODEV;
goto halt_fail_and_release;
}
if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) &&
*phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
- if (netif_msg_probe(dev))
- dev_dbg(&intf->dev, "driver requires non-wireless "
- "physical medium, but device is wireless.\n");
+ netif_dbg(dev, probe, dev->net,
+ "driver requires non-wireless physical medium, but device is wireless.\n");
retval = -ENODEV;
goto halt_fail_and_release;
}
@@ -497,9 +496,9 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb->len < msg_len ||
(data_offset + data_len + 8) > msg_len)) {
dev->net->stats.rx_frame_errors++;
- devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
- le32_to_cpu(hdr->msg_type),
- msg_len, data_offset, data_len, skb->len);
+ netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n",
+ le32_to_cpu(hdr->msg_type),
+ msg_len, data_offset, data_len, skb->len);
return 0;
}
skb_pull(skb, 8 + data_offset);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index f14d225404da..e85c89c6706d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -270,7 +270,7 @@ static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT) {
+ if (i <= MII_TIMEOUT) {
get_registers(dev, PHYDAT, 2, data);
*reg = data[0] | (data[1] << 8);
return 0;
@@ -295,7 +295,7 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT)
+ if (i <= MII_TIMEOUT)
return 0;
else
return 1;
@@ -313,20 +313,17 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
rtl8150_t *dev = netdev_priv(netdev);
- int i;
if (netif_running(netdev))
return -EBUSY;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dbg("%s: Setting MAC address to ", netdev->name);
- for (i = 0; i < 5; i++)
- dbg("%02X:", netdev->dev_addr[i]);
- dbg("%02X\n", netdev->dev_addr[i]);
+ dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
#ifdef EEPROM_WRITE
{
+ int i;
u8 cr;
/* Get the CR contents. */
get_registers(dev, CR, 1, &cr);
@@ -714,7 +711,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
dev->rx_creg |= cpu_to_le16(0x0001);
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
dev->rx_creg &= cpu_to_le16(0xfffe);
dev->rx_creg |= cpu_to_le16(0x0002);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
new file mode 100644
index 000000000000..a44f9e0ea098
--- /dev/null
+++ b/drivers/net/usb/sierra_net.c
@@ -0,0 +1,1001 @@
+/*
+ * USB-to-WWAN Driver for Sierra Wireless modems
+ *
+ * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer
+ * <linux@sierrawireless.com>
+ *
+ * Portions of this based on the cdc_ether driver by David Brownell (2003-2005)
+ * and Ole Andre Vadla Ravnas (ActiveSync) (2006).
+ *
+ * IMPORTANT DISCLAIMER: This driver is not commercially supported by
+ * Sierra Wireless. Use at your own risk.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define DRIVER_VERSION "v.2.0"
+#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
+#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
+static const char driver_name[] = "sierra_net";
+
+/* if defined debug messages enabled */
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <asm/unaligned.h>
+#include <linux/usb/usbnet.h>
+
+#define SWI_USB_REQUEST_GET_FW_ATTR 0x06
+#define SWI_GET_FW_ATTR_MASK 0x08
+
+/* atomic counter partially included in MAC address to make sure 2 devices
+ * do not end up with the same MAC - concept breaks in case of > 255 ifaces
+ */
+static atomic_t iface_counter = ATOMIC_INIT(0);
+
+/*
+ * SYNC Timer Delay definition used to set the expiry time
+ */
+#define SIERRA_NET_SYNCDELAY (2*HZ)
+
+/* Max. MTU supported. The modem buffers are limited to 1500 */
+#define SIERRA_NET_MAX_SUPPORTED_MTU 1500
+
+/* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control
+ * message reception ... and thus the max. received packet.
+ * (May be the cause for parse_hip returning -EINVAL)
+ */
+#define SIERRA_NET_USBCTL_BUF_LEN 1024
+
+/* list of interface numbers - used for constructing interface lists */
+struct sierra_net_iface_info {
+ const u32 infolen; /* number of interface numbers on list */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+};
+
+struct sierra_net_info_data {
+ u16 rx_urb_size;
+ struct sierra_net_iface_info whitelist;
+};
+
+/* Private data structure */
+struct sierra_net_data {
+
+ u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
+
+ u16 link_up; /* air link up or down */
+ u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
+
+ u8 sync_msg[4]; /* SYNC message */
+ u8 shdwn_msg[4]; /* Shutdown message */
+
+ /* Backpointer to the container */
+ struct usbnet *usbnet;
+
+ u8 ifnum; /* interface number */
+
+/* Bit masks, must be a power of 2 */
+#define SIERRA_NET_EVENT_RESP_AVAIL 0x01
+#define SIERRA_NET_TIMER_EXPIRY 0x02
+ unsigned long kevent_flags;
+ struct work_struct sierra_net_kevent;
+ struct timer_list sync_timer; /* For retrying SYNC sequence */
+};
+
+struct param {
+ int is_present;
+ union {
+ void *ptr;
+ u32 dword;
+ u16 word;
+ u8 byte;
+ };
+};
+
+/* HIP message type */
+#define SIERRA_NET_HIP_EXTENDEDID 0x7F
+#define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */
+#define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */
+#define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */
+#define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */
+
+#define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202
+#define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002
+
+/* 3G UMTS Link Sense Indication definitions */
+#define SIERRA_NET_HIP_LSI_UMTSID 0x78
+
+/* Reverse Channel Grant Indication HIP message */
+#define SIERRA_NET_HIP_RCGI 0x64
+
+/* LSI Protocol types */
+#define SIERRA_NET_PROTOCOL_UMTS 0x01
+/* LSI Coverage */
+#define SIERRA_NET_COVERAGE_NONE 0x00
+#define SIERRA_NET_COVERAGE_NOPACKET 0x01
+
+/* LSI Session */
+#define SIERRA_NET_SESSION_IDLE 0x00
+/* LSI Link types */
+#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
+
+struct lsi_umts {
+ u8 protocol;
+ u8 unused1;
+ __be16 length;
+ /* eventually use a union for the rest - assume umts for now */
+ u8 coverage;
+ u8 unused2[41];
+ u8 session_state;
+ u8 unused3[33];
+ u8 link_type;
+ u8 pdp_addr_len; /* NW-supplied PDP address len */
+ u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
+ u8 unused4[23];
+ u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */
+ u8 dns1_addr[16]; /* NW-supplied 1st DNS address */
+ u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */
+ u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/
+ u8 wins1_addr_len; /* NW-supplied 1st Wins address len */
+ u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/
+ u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */
+ u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */
+ u8 unused5[4];
+ u8 gw_addr_len; /* NW-supplied GW address len */
+ u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
+ u8 reserved[8];
+} __attribute__ ((packed));
+
+#define SIERRA_NET_LSI_COMMON_LEN 4
+#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
+ (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+
+/* Forward definitions */
+static void sierra_sync_timer(unsigned long syncdata);
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
+
+/* Our own net device operations structure */
+static const struct net_device_ops sierra_net_device_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = sierra_net_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* get private data associated with passed in usbnet device */
+static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev)
+{
+ return (struct sierra_net_data *)dev->data[0];
+}
+
+/* set private data associated with passed in usbnet device */
+static inline void sierra_net_set_private(struct usbnet *dev,
+ struct sierra_net_data *priv)
+{
+ dev->data[0] = (unsigned long)priv;
+}
+
+/* is packet IPv4 */
+static inline int is_ip(struct sk_buff *skb)
+{
+ return (skb->protocol == cpu_to_be16(ETH_P_IP));
+}
+
+/*
+ * check passed in packet and make sure that:
+ * - it is linear (no scatter/gather)
+ * - it is ethernet (mac_header properly set)
+ */
+static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
+{
+ skb_reset_mac_header(skb); /* ethernet header */
+
+ if (skb_is_nonlinear(skb)) {
+ netdev_err(dev->net, "Non linear buffer-dropping\n");
+ return 0;
+ }
+
+ if (!pskb_may_pull(skb, ETH_HLEN))
+ return 0;
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ return 1;
+}
+
+static const u8 *save16bit(struct param *p, const u8 *datap)
+{
+ p->is_present = 1;
+ p->word = get_unaligned_be16(datap);
+ return datap + sizeof(p->word);
+}
+
+static const u8 *save8bit(struct param *p, const u8 *datap)
+{
+ p->is_present = 1;
+ p->byte = *datap;
+ return datap + sizeof(p->byte);
+}
+
+/*----------------------------------------------------------------------------*
+ * BEGIN HIP *
+ *----------------------------------------------------------------------------*/
+/* HIP header */
+#define SIERRA_NET_HIP_HDR_LEN 4
+/* Extended HIP header */
+#define SIERRA_NET_HIP_EXT_HDR_LEN 6
+
+struct hip_hdr {
+ int hdrlen;
+ struct param payload_len;
+ struct param msgid;
+ struct param msgspecific;
+ struct param extmsgid;
+};
+
+static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh)
+{
+ const u8 *curp = buf;
+ int padded;
+
+ if (buflen < SIERRA_NET_HIP_HDR_LEN)
+ return -EPROTO;
+
+ curp = save16bit(&hh->payload_len, curp);
+ curp = save8bit(&hh->msgid, curp);
+ curp = save8bit(&hh->msgspecific, curp);
+
+ padded = hh->msgid.byte & 0x80;
+ hh->msgid.byte &= 0x7F; /* 7 bits */
+
+ hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID);
+ if (hh->extmsgid.is_present) {
+ if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN)
+ return -EPROTO;
+
+ hh->payload_len.word &= 0x3FFF; /* 14 bits */
+
+ curp = save16bit(&hh->extmsgid, curp);
+ hh->extmsgid.word &= 0x03FF; /* 10 bits */
+
+ hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN;
+ } else {
+ hh->payload_len.word &= 0x07FF; /* 11 bits */
+ hh->hdrlen = SIERRA_NET_HIP_HDR_LEN;
+ }
+
+ if (padded) {
+ hh->hdrlen++;
+ hh->payload_len.word--;
+ }
+
+ /* if real packet shorter than the claimed length */
+ if (buflen < (hh->hdrlen + hh->payload_len.word))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void build_hip(u8 *buf, const u16 payloadlen,
+ struct sierra_net_data *priv)
+{
+ /* the following doesn't have the full functionality. We
+ * currently build only one kind of header, so it is faster this way
+ */
+ put_unaligned_be16(payloadlen, buf);
+ memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template));
+}
+/*----------------------------------------------------------------------------*
+ * END HIP *
+ *----------------------------------------------------------------------------*/
+
+static int sierra_net_send_cmd(struct usbnet *dev,
+ u8 *cmd, int cmdlen, const char * cmd_name)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ int status;
+
+ status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_CDC_SEND_ENCAPSULATED_COMMAND,
+ USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
+ priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
+
+ if (status != cmdlen && status != -ENODEV)
+ netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
+
+ return status;
+}
+
+static int sierra_net_send_sync(struct usbnet *dev)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ status = sierra_net_send_cmd(dev, priv->sync_msg,
+ sizeof(priv->sync_msg), "SYNC");
+
+ return status;
+}
+
+static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
+{
+ dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
+ priv->tx_hdr_template[0] = 0x3F;
+ priv->tx_hdr_template[1] = ctx_ix;
+ *((u16 *)&priv->tx_hdr_template[2]) =
+ cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
+}
+
+static inline int sierra_net_is_valid_addrlen(u8 len)
+{
+ return (len == sizeof(struct in_addr));
+}
+
+static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
+{
+ struct lsi_umts *lsi = (struct lsi_umts *)data;
+
+ if (datalen < sizeof(struct lsi_umts)) {
+ netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
+ __func__, datalen,
+ sizeof(struct lsi_umts));
+ return -1;
+ }
+
+ if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
+ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+ __func__, be16_to_cpu(lsi->length),
+ (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
+ return -1;
+ }
+
+ /* Validate the protocol - only support UMTS for now */
+ if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
+ netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+ lsi->protocol);
+ return -1;
+ }
+
+ /* Validate the link type */
+ if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
+ netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+ lsi->link_type);
+ return -1;
+ }
+
+ /* Validate the coverage */
+ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
+ || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+ netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
+ return 0;
+ }
+
+ /* Validate the session state */
+ if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
+ netdev_err(dev->net, "Session idle, 0x%02x\n",
+ lsi->session_state);
+ return 0;
+ }
+
+ /* Set link_sense true */
+ return 1;
+}
+
+static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
+ struct hip_hdr *hh)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ int link_up;
+
+ link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen,
+ hh->payload_len.word);
+ if (link_up < 0) {
+ netdev_err(dev->net, "Invalid LSI\n");
+ return;
+ }
+ if (link_up) {
+ sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
+ priv->link_up = 1;
+ netif_carrier_on(dev->net);
+ } else {
+ priv->link_up = 0;
+ netif_carrier_off(dev->net);
+ }
+}
+
+static void sierra_net_dosync(struct usbnet *dev)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* tell modem we are ready */
+ status = sierra_net_send_sync(dev);
+ if (status < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed, status %d\n", status);
+ status = sierra_net_send_sync(dev);
+ if (status < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed, status %d\n", status);
+
+ /* Now, start a timer and make sure we get the Restart Indication */
+ priv->sync_timer.function = sierra_sync_timer;
+ priv->sync_timer.data = (unsigned long) dev;
+ priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
+ add_timer(&priv->sync_timer);
+}
+
+static void sierra_net_kevent(struct work_struct *work)
+{
+ struct sierra_net_data *priv =
+ container_of(work, struct sierra_net_data, sierra_net_kevent);
+ struct usbnet *dev = priv->usbnet;
+ int len;
+ int err;
+ u8 *buf;
+ u8 ifnum;
+
+ if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) {
+ clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags);
+
+ /* Query the modem for the LSI message */
+ buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev->net,
+ "failed to allocate buf for LS msg\n");
+ return;
+ }
+ ifnum = priv->ifnum;
+ len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ USB_CDC_GET_ENCAPSULATED_RESPONSE,
+ USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
+ 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len < 0) {
+ netdev_err(dev->net,
+ "usb_control_msg failed, status %d\n", len);
+ } else {
+ struct hip_hdr hh;
+
+ dev_dbg(&dev->udev->dev, "%s: Received status message,"
+ " %04x bytes", __func__, len);
+
+ err = parse_hip(buf, len, &hh);
+ if (err) {
+ netdev_err(dev->net, "%s: Bad packet,"
+ " parse result %d\n", __func__, err);
+ kfree(buf);
+ return;
+ }
+
+ /* Validate packet length */
+ if (len != hh.hdrlen + hh.payload_len.word) {
+ netdev_err(dev->net, "%s: Bad packet, received"
+ " %d, expected %d\n", __func__, len,
+ hh.hdrlen + hh.payload_len.word);
+ kfree(buf);
+ return;
+ }
+
+ /* Switch on received message types */
+ switch (hh.msgid.byte) {
+ case SIERRA_NET_HIP_LSI_UMTSID:
+ dev_dbg(&dev->udev->dev, "LSI for ctx:%d",
+ hh.msgspecific.byte);
+ sierra_net_handle_lsi(dev, buf, &hh);
+ break;
+ case SIERRA_NET_HIP_RESTART_ID:
+ dev_dbg(&dev->udev->dev, "Restart reported: %d,"
+ " stopping sync timer",
+ hh.msgspecific.byte);
+ /* Got sync resp - stop timer & clear mask */
+ del_timer_sync(&priv->sync_timer);
+ clear_bit(SIERRA_NET_TIMER_EXPIRY,
+ &priv->kevent_flags);
+ break;
+ case SIERRA_NET_HIP_HSYNC_ID:
+ dev_dbg(&dev->udev->dev, "SYNC received");
+ err = sierra_net_send_sync(dev);
+ if (err < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed %d\n", err);
+ break;
+ case SIERRA_NET_HIP_EXTENDEDID:
+ netdev_err(dev->net, "Unrecognized HIP msg, "
+ "extmsgid 0x%04x\n", hh.extmsgid.word);
+ break;
+ case SIERRA_NET_HIP_RCGI:
+ /* Ignored */
+ break;
+ default:
+ netdev_err(dev->net, "Unrecognized HIP msg, "
+ "msgid 0x%02x\n", hh.msgid.byte);
+ break;
+ }
+ }
+ kfree(buf);
+ }
+ /* The sync timer bit might be set */
+ if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) {
+ clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags);
+ dev_dbg(&dev->udev->dev, "Deferred sync timer expiry");
+ sierra_net_dosync(priv->usbnet);
+ }
+
+ if (priv->kevent_flags)
+ dev_dbg(&dev->udev->dev, "sierra_net_kevent done, "
+ "kevent_flags = 0x%lx", priv->kevent_flags);
+}
+
+static void sierra_net_defer_kevent(struct usbnet *dev, int work)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ set_bit(work, &priv->kevent_flags);
+ schedule_work(&priv->sierra_net_kevent);
+}
+
+/*
+ * Sync Retransmit Timer Handler. On expiry, kick the work queue
+ */
+void sierra_sync_timer(unsigned long syncdata)
+{
+ struct usbnet *dev = (struct usbnet *)syncdata;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+ /* Kick the tasklet */
+ sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY);
+}
+
+static void sierra_net_status(struct usbnet *dev, struct urb *urb)
+{
+ struct usb_cdc_notification *event;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ if (urb->actual_length < sizeof *event)
+ return;
+
+ /* Add cases to handle other standard notifications. */
+ event = urb->transfer_buffer;
+ switch (event->bNotificationType) {
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+ case USB_CDC_NOTIFY_SPEED_CHANGE:
+ /* USB 305 sends those */
+ break;
+ case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL);
+ break;
+ default:
+ netdev_err(dev->net, ": unexpected notification %02x!\n",
+ event->bNotificationType);
+ break;
+ }
+}
+
+static void sierra_net_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, driver_name, sizeof info->driver);
+ strncpy(info->version, DRIVER_VERSION, sizeof info->version);
+}
+
+static u32 sierra_net_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ /* Report link is down whenever the interface is down */
+ return sierra_net_get_private(dev)->link_up && netif_running(net);
+}
+
+static struct ethtool_ops sierra_net_ethtool_ops = {
+ .get_drvinfo = sierra_net_get_drvinfo,
+ .get_link = sierra_net_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+/* MTU can not be more than 1500 bytes, enforce it. */
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
+{
+ if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
+ return -EINVAL;
+
+ return usbnet_change_mtu(net, new_mtu);
+}
+
+static int is_whitelisted(const u8 ifnum,
+ const struct sierra_net_iface_info *whitelist)
+{
+ if (whitelist) {
+ const u8 *list = whitelist->ifaceinfo;
+ int i;
+
+ for (i = 0; i < whitelist->infolen; i++) {
+ if (list[i] == ifnum)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
+{
+ int result = 0;
+ u16 *attrdata;
+
+ attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
+ if (!attrdata)
+ return -ENOMEM;
+
+ result = usb_control_msg(
+ dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ /* _u8 vendor specific request */
+ SWI_USB_REQUEST_GET_FW_ATTR,
+ USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
+ 0x0000, /* __u16 value not used */
+ 0x0000, /* __u16 index not used */
+ attrdata, /* char *data */
+ sizeof(*attrdata), /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+
+ if (result < 0) {
+ kfree(attrdata);
+ return -EIO;
+ }
+
+ *datap = *attrdata;
+
+ kfree(attrdata);
+ return result;
+}
+
+/*
+ * collects the bulk endpoints, the status endpoint.
+ */
+static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ u8 ifacenum;
+ u8 numendpoints;
+ u16 fwattr = 0;
+ int status;
+ struct ethhdr *eth;
+ struct sierra_net_data *priv;
+ static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
+ 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
+ static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
+ 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
+
+ struct sierra_net_info_data *data =
+ (struct sierra_net_info_data *)dev->driver_info->data;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
+ /* We only accept certain interfaces */
+ if (!is_whitelisted(ifacenum, &data->whitelist)) {
+ dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
+ return -ENODEV;
+ }
+ numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
+ /* We have three endpoints, bulk in and out, and a status */
+ if (numendpoints != 3) {
+ dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d",
+ numendpoints);
+ return -ENODEV;
+ }
+ /* Status endpoint set in usbnet_get_endpoints() */
+ dev->status = NULL;
+ status = usbnet_get_endpoints(dev, intf);
+ if (status < 0) {
+ dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)",
+ status);
+ return -ENODEV;
+ }
+ /* Initialize sierra private data */
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv) {
+ dev_err(&dev->udev->dev, "No memory");
+ return -ENOMEM;
+ }
+
+ priv->usbnet = dev;
+ priv->ifnum = ifacenum;
+ dev->net->netdev_ops = &sierra_net_device_ops;
+
+ /* change MAC addr to include, ifacenum, and to be unique */
+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
+ dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+
+ /* we will have to manufacture ethernet headers, prepare template */
+ eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
+ memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
+ eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+ /* prepare shutdown message template */
+ memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
+ /* set context index initially to 0 - prepares tx hdr template */
+ sierra_net_set_ctx_index(priv, 0);
+
+ /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
+ dev->rx_urb_size = data->rx_urb_size;
+ if (dev->udev->speed != USB_SPEED_HIGH)
+ dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size);
+
+ dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ /* Set up the netdev */
+ dev->net->flags |= IFF_NOARP;
+ dev->net->ethtool_ops = &sierra_net_ethtool_ops;
+ netif_carrier_off(dev->net);
+
+ sierra_net_set_private(dev, priv);
+
+ priv->kevent_flags = 0;
+
+ /* Use the shared workqueue */
+ INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
+
+ /* Only need to do this once */
+ init_timer(&priv->sync_timer);
+
+ /* verify fw attributes */
+ status = sierra_net_get_fw_attr(dev, &fwattr);
+ dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr);
+
+ /* test whether firmware supports DHCP */
+ if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) {
+ /* found incompatible firmware version */
+ dev_err(&dev->udev->dev, "Incompatible driver and firmware"
+ " versions\n");
+ kfree(priv);
+ return -ENODEV;
+ }
+ /* prepare sync message from template */
+ memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
+
+ return 0;
+}
+
+static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* Kill the timer then flush the work queue */
+ del_timer_sync(&priv->sync_timer);
+
+ flush_scheduled_work();
+
+ /* tell modem we are going away */
+ status = sierra_net_send_cmd(dev, priv->shdwn_msg,
+ sizeof(priv->shdwn_msg), "Shutdown");
+ if (status < 0)
+ netdev_err(dev->net,
+ "usb_control_msg failed, status %d\n", status);
+
+ sierra_net_set_private(dev, NULL);
+
+ kfree(priv);
+}
+
+static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev,
+ struct sk_buff *skb, int len)
+{
+ struct sk_buff *new_skb;
+
+ /* clone skb */
+ new_skb = skb_clone(skb, GFP_ATOMIC);
+
+ /* remove len bytes from original */
+ skb_pull(skb, len);
+
+ /* trim next packet to it's length */
+ if (new_skb) {
+ skb_trim(new_skb, len);
+ } else {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "failed to get skb\n");
+ dev->net->stats.rx_dropped++;
+ }
+
+ return new_skb;
+}
+
+/* ---------------------------- Receive data path ----------------------*/
+static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int err;
+ struct hip_hdr hh;
+ struct sk_buff *new_skb;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* could contain multiple packets */
+ while (likely(skb->len)) {
+ err = parse_hip(skb->data, skb->len, &hh);
+ if (err) {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "Invalid HIP header %d\n",
+ err);
+ /* dev->net->stats.rx_errors incremented by caller */
+ dev->net->stats.rx_length_errors++;
+ return 0;
+ }
+
+ /* Validate Extended HIP header */
+ if (!hh.extmsgid.is_present
+ || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
+
+ dev->net->stats.rx_frame_errors++;
+ /* dev->net->stats.rx_errors incremented by caller */;
+ return 0;
+ }
+
+ skb_pull(skb, hh.hdrlen);
+
+ /* We are going to accept this packet, prepare it */
+ memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
+ ETH_HLEN);
+
+ /* Last packet in batch handled by usbnet */
+ if (hh.payload_len.word == skb->len)
+ return 1;
+
+ new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
+ if (new_skb)
+ usbnet_skb_return(dev, new_skb);
+
+ } /* while */
+
+ return 0;
+}
+
+/* ---------------------------- Transmit data path ----------------------*/
+struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ u16 len;
+ bool need_tail;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+ if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
+ /* enough head room as is? */
+ if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
+ /* Save the Eth/IP length and set up HIP hdr */
+ len = skb->len;
+ skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
+ /* Handle ZLP issue */
+ need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN)
+ % dev->maxpacket == 0);
+ if (need_tail) {
+ if (unlikely(skb_tailroom(skb) == 0)) {
+ netdev_err(dev->net, "tx_fixup:"
+ "no room for packet\n");
+ dev_kfree_skb_any(skb);
+ return NULL;
+ } else {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ len = len + 1;
+ }
+ }
+ build_hip(skb->data, len, priv);
+ return skb;
+ } else {
+ /*
+ * compensate in the future if necessary
+ */
+ netdev_err(dev->net, "tx_fixup: no room for HIP\n");
+ } /* headroom */
+ }
+
+ if (!priv->link_up)
+ dev->net->stats.tx_carrier_errors++;
+
+ /* tx_dropped incremented by usbnet */
+
+ /* filter the packet out, release it */
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
+static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+ .rx_urb_size = 8 * 1024,
+ .whitelist = {
+ .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
+ .ifaceinfo = sierra_net_ifnum_list
+ }
+};
+
+static const struct driver_info sierra_net_info_68A3 = {
+ .description = "Sierra Wireless USB-to-WWAN Modem",
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = sierra_net_bind,
+ .unbind = sierra_net_unbind,
+ .status = sierra_net_status,
+ .rx_fixup = sierra_net_rx_fixup,
+ .tx_fixup = sierra_net_tx_fixup,
+ .data = (unsigned long)&sierra_net_info_data_68A3,
+};
+
+static const struct usb_device_id products[] = {
+ {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+ .driver_info = (unsigned long) &sierra_net_info_68A3},
+
+ {}, /* last item */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+/* We are based on usbnet, so let it handle the USB driver specifics */
+static struct usb_driver sierra_net_driver = {
+ .name = "sierra_net",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .no_dynamic_id = 1,
+};
+
+static int __init sierra_net_init(void)
+{
+ BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+ < sizeof(struct cdc_state));
+
+ return usb_register(&sierra_net_driver);
+}
+
+static void __exit sierra_net_exit(void)
+{
+ usb_deregister(&sierra_net_driver);
+}
+
+module_exit(sierra_net_exit);
+module_init(sierra_net_init);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
new file mode 100644
index 000000000000..35b98b1b79e4
--- /dev/null
+++ b/drivers/net/usb/smsc75xx.c
@@ -0,0 +1,1289 @@
+ /***************************************************************************
+ *
+ * Copyright (C) 2007-2010 SMSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include "smsc75xx.h"
+
+#define SMSC_CHIPNAME "smsc75xx"
+#define SMSC_DRIVER_VERSION "1.0.0"
+#define HS_USB_PKT_SIZE (512)
+#define FS_USB_PKT_SIZE (64)
+#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
+#define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE)
+#define DEFAULT_BULK_IN_DELAY (0x00002000)
+#define MAX_SINGLE_PACKET_SIZE (9000)
+#define LAN75XX_EEPROM_MAGIC (0x7500)
+#define EEPROM_MAC_OFFSET (0x01)
+#define DEFAULT_TX_CSUM_ENABLE (true)
+#define DEFAULT_RX_CSUM_ENABLE (true)
+#define DEFAULT_TSO_ENABLE (true)
+#define SMSC75XX_INTERNAL_PHY_ID (1)
+#define SMSC75XX_TX_OVERHEAD (8)
+#define MAX_RX_FIFO_SIZE (20 * 1024)
+#define MAX_TX_FIFO_SIZE (12 * 1024)
+#define USB_VENDOR_ID_SMSC (0x0424)
+#define USB_PRODUCT_ID_LAN7500 (0x7500)
+#define USB_PRODUCT_ID_LAN7505 (0x7505)
+
+#define check_warn(ret, fmt, args...) \
+ ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
+
+#define check_warn_return(ret, fmt, args...) \
+ ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
+
+#define check_warn_goto_done(ret, fmt, args...) \
+ ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
+
+struct smsc75xx_priv {
+ struct usbnet *dev;
+ u32 rfe_ctl;
+ u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
+ bool use_rx_csum;
+ struct mutex dataport_mutex;
+ spinlock_t rfe_ctl_lock;
+ struct work_struct set_multicast;
+};
+
+struct usb_context {
+ struct usb_ctrlrequest req;
+ struct usbnet *dev;
+};
+
+static int turbo_mode = true;
+module_param(turbo_mode, bool, 0644);
+MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
+
+static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
+{
+ u32 *buf = kmalloc(4, GFP_KERNEL);
+ int ret;
+
+ BUG_ON(!dev);
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_READ_REGISTER,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net,
+ "Failed to read register index 0x%08x", index);
+
+ le32_to_cpus(buf);
+ *data = *buf;
+ kfree(buf);
+
+ return ret;
+}
+
+static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
+{
+ u32 *buf = kmalloc(4, GFP_KERNEL);
+ int ret;
+
+ BUG_ON(!dev);
+
+ if (!buf)
+ return -ENOMEM;
+
+ *buf = data;
+ cpu_to_le32s(buf);
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_WRITE_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net,
+ "Failed to write register index 0x%08x", index);
+
+ kfree(buf);
+
+ return ret;
+}
+
+/* Loop until the read is completed with timeout
+ * called with phy_mutex held */
+static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = smsc75xx_read_reg(dev, MII_ACCESS, &val);
+ check_warn_return(ret, "Error reading MII_ACCESS");
+
+ if (!(val & MII_ACCESS_BUSY))
+ return 0;
+ } while (!time_after(jiffies, start_time + HZ));
+
+ return -EIO;
+}
+
+static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = smsc75xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read");
+
+ /* set the address, index & direction (read from PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ idx &= dev->mii.reg_num_mask;
+ addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
+ | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
+ | MII_ACCESS_READ;
+ ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
+ check_warn_goto_done(ret, "Error writing MII_ACCESS");
+
+ ret = smsc75xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
+
+ ret = smsc75xx_read_reg(dev, MII_DATA, &val);
+ check_warn_goto_done(ret, "Error reading MII_DATA");
+
+ ret = (u16)(val & 0xFFFF);
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
+}
+
+static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
+ int regval)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = smsc75xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write");
+
+ val = regval;
+ ret = smsc75xx_write_reg(dev, MII_DATA, val);
+ check_warn_goto_done(ret, "Error writing MII_DATA");
+
+ /* set the address, index & direction (write to PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ idx &= dev->mii.reg_num_mask;
+ addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
+ | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
+ | MII_ACCESS_WRITE;
+ ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
+ check_warn_goto_done(ret, "Error writing MII_ACCESS");
+
+ ret = smsc75xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+}
+
+static int smsc75xx_wait_eeprom(struct usbnet *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
+ check_warn_return(ret, "Error reading E2P_CMD");
+
+ if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT))
+ break;
+ udelay(40);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) {
+ netdev_warn(dev->net, "EEPROM read operation timeout");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
+ check_warn_return(ret, "Error reading E2P_CMD");
+
+ if (!(val & E2P_CMD_BUSY))
+ return 0;
+
+ udelay(40);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ netdev_warn(dev->net, "EEPROM is busy");
+ return -EIO;
+}
+
+static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
+ u8 *data)
+{
+ u32 val;
+ int i, ret;
+
+ BUG_ON(!dev);
+ BUG_ON(!data);
+
+ ret = smsc75xx_eeprom_confirm_not_busy(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR);
+ ret = smsc75xx_write_reg(dev, E2P_CMD, val);
+ check_warn_return(ret, "Error writing E2P_CMD");
+
+ ret = smsc75xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = smsc75xx_read_reg(dev, E2P_DATA, &val);
+ check_warn_return(ret, "Error reading E2P_DATA");
+
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
+ u8 *data)
+{
+ u32 val;
+ int i, ret;
+
+ BUG_ON(!dev);
+ BUG_ON(!data);
+
+ ret = smsc75xx_eeprom_confirm_not_busy(dev);
+ if (ret)
+ return ret;
+
+ /* Issue write/erase enable command */
+ val = E2P_CMD_BUSY | E2P_CMD_EWEN;
+ ret = smsc75xx_write_reg(dev, E2P_CMD, val);
+ check_warn_return(ret, "Error writing E2P_CMD");
+
+ ret = smsc75xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+
+ /* Fill data register */
+ val = data[i];
+ ret = smsc75xx_write_reg(dev, E2P_DATA, val);
+ check_warn_return(ret, "Error writing E2P_DATA");
+
+ /* Send "write" command */
+ val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR);
+ ret = smsc75xx_write_reg(dev, E2P_CMD, val);
+ check_warn_return(ret, "Error writing E2P_CMD");
+
+ ret = smsc75xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < 100; i++) {
+ u32 dp_sel;
+ ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
+ check_warn_return(ret, "Error reading DP_SEL");
+
+ if (dp_sel & DP_SEL_DPRDY)
+ return 0;
+
+ udelay(40);
+ }
+
+ netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out");
+
+ return -EIO;
+}
+
+static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr,
+ u32 length, u32 *buf)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u32 dp_sel;
+ int i, ret;
+
+ mutex_lock(&pdata->dataport_mutex);
+
+ ret = smsc75xx_dataport_wait_not_busy(dev);
+ check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry");
+
+ ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
+ check_warn_goto_done(ret, "Error reading DP_SEL");
+
+ dp_sel &= ~DP_SEL_RSEL;
+ dp_sel |= ram_select;
+ ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel);
+ check_warn_goto_done(ret, "Error writing DP_SEL");
+
+ for (i = 0; i < length; i++) {
+ ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i);
+ check_warn_goto_done(ret, "Error writing DP_ADDR");
+
+ ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]);
+ check_warn_goto_done(ret, "Error writing DP_DATA");
+
+ ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE);
+ check_warn_goto_done(ret, "Error writing DP_CMD");
+
+ ret = smsc75xx_dataport_wait_not_busy(dev);
+ check_warn_goto_done(ret, "smsc75xx_dataport_write timeout");
+ }
+
+done:
+ mutex_unlock(&pdata->dataport_mutex);
+ return ret;
+}
+
+/* returns hash bit number for given MAC address */
+static u32 smsc75xx_hash(char addr[ETH_ALEN])
+{
+ return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
+}
+
+static void smsc75xx_deferred_multicast_write(struct work_struct *param)
+{
+ struct smsc75xx_priv *pdata =
+ container_of(param, struct smsc75xx_priv, set_multicast);
+ struct usbnet *dev = pdata->dev;
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x",
+ pdata->rfe_ctl);
+
+ smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table);
+
+ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ check_warn(ret, "Error writing RFE_CRL");
+}
+
+static void smsc75xx_set_multicast(struct net_device *netdev)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+ pdata->rfe_ctl &=
+ ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF);
+ pdata->rfe_ctl |= RFE_CTL_AB;
+
+ for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
+ pdata->multicast_hash_table[i] = 0;
+
+ if (dev->net->flags & IFF_PROMISC) {
+ netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+ pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU;
+ } else if (dev->net->flags & IFF_ALLMULTI) {
+ netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
+ pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
+ } else if (!netdev_mc_empty(dev->net)) {
+ struct dev_mc_list *mc_list;
+
+ netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+
+ pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
+
+ netdev_for_each_mc_addr(mc_list, netdev) {
+ u32 bitnum = smsc75xx_hash(mc_list->dmi_addr);
+ pdata->multicast_hash_table[bitnum / 32] |=
+ (1 << (bitnum % 32));
+ }
+ } else {
+ netif_dbg(dev, drv, dev->net, "receive own packets only");
+ pdata->rfe_ctl |= RFE_CTL_DPF;
+ }
+
+ spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+ /* defer register writes to a sleepable context */
+ schedule_work(&pdata->set_multicast);
+}
+
+static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex,
+ u16 lcladv, u16 rmtadv)
+{
+ u32 flow = 0, fct_flow = 0;
+ int ret;
+
+ if (duplex == DUPLEX_FULL) {
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_TX) {
+ flow = (FLOW_TX_FCEN | 0xFFFF);
+ /* set fct_flow thresholds to 20% and 80% */
+ fct_flow = (8 << 8) | 32;
+ }
+
+ if (cap & FLOW_CTRL_RX)
+ flow |= FLOW_RX_FCEN;
+
+ netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
+ (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+ (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ } else {
+ netif_dbg(dev, link, dev->net, "half duplex");
+ }
+
+ ret = smsc75xx_write_reg(dev, FLOW, flow);
+ check_warn_return(ret, "Error writing FLOW");
+
+ ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow);
+ check_warn_return(ret, "Error writing FCT_FLOW");
+
+ return 0;
+}
+
+static int smsc75xx_link_reset(struct usbnet *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ struct ethtool_cmd ecmd;
+ u16 lcladv, rmtadv;
+ int ret;
+
+ /* clear interrupt status */
+ ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
+ check_warn_return(ret, "Error reading PHY_INT_SRC");
+
+ ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
+ check_warn_return(ret, "Error writing INT_STS");
+
+ mii_check_media(mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+ lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+ rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+
+ netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x"
+ " rmtadv: %04x", ecmd.speed, ecmd.duplex, lcladv, rmtadv);
+
+ return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+}
+
+static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
+{
+ u32 intdata;
+
+ if (urb->actual_length != 4) {
+ netdev_warn(dev->net,
+ "unexpected urb length %d", urb->actual_length);
+ return;
+ }
+
+ memcpy(&intdata, urb->transfer_buffer, 4);
+ le32_to_cpus(&intdata);
+
+ netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata);
+
+ if (intdata & INT_ENP_PHY_INT)
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ else
+ netdev_warn(dev->net,
+ "unexpected interrupt, intdata=0x%08X", intdata);
+}
+
+/* Enable or disable Rx checksum offload engine */
+static int smsc75xx_set_rx_csum_offload(struct usbnet *dev)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+ if (pdata->use_rx_csum)
+ pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
+ else
+ pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
+
+ spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ check_warn_return(ret, "Error writing RFE_CTL");
+
+ return 0;
+}
+
+static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
+{
+ return MAX_EEPROM_SIZE;
+}
+
+static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ ee->magic = LAN75XX_EEPROM_MAGIC;
+
+ return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ if (ee->magic != LAN75XX_EEPROM_MAGIC) {
+ netdev_warn(dev->net,
+ "EEPROM: magic value mismatch: 0x%x", ee->magic);
+ return -EINVAL;
+ }
+
+ return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+ return pdata->use_rx_csum;
+}
+
+static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+ pdata->use_rx_csum = !!val;
+
+ return smsc75xx_set_rx_csum_offload(dev);
+}
+
+static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data)
+{
+ if (data)
+ netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ else
+ netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
+
+static const struct ethtool_ops smsc75xx_ethtool_ops = {
+ .get_link = usbnet_get_link,
+ .nway_reset = usbnet_nway_reset,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
+ .get_eeprom = smsc75xx_ethtool_get_eeprom,
+ .set_eeprom = smsc75xx_ethtool_set_eeprom,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .get_rx_csum = smsc75xx_ethtool_get_rx_csum,
+ .set_rx_csum = smsc75xx_ethtool_set_rx_csum,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = smsc75xx_ethtool_set_tso,
+};
+
+static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static void smsc75xx_init_mac_address(struct usbnet *dev)
+{
+ /* try reading mac address from EEPROM */
+ if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+ dev->net->dev_addr) == 0) {
+ if (is_valid_ether_addr(dev->net->dev_addr)) {
+ /* eeprom values are valid so use them */
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from EEPROM");
+ return;
+ }
+ }
+
+ /* no eeprom, or eeprom values are invalid. generate random MAC */
+ random_ether_addr(dev->net->dev_addr);
+ netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr");
+}
+
+static int smsc75xx_set_mac_address(struct usbnet *dev)
+{
+ u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 |
+ dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24;
+ u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8;
+
+ int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi);
+ check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret);
+
+ ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo);
+ check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret);
+
+ addr_hi |= ADDR_FILTX_FB_VALID;
+ ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi);
+ check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret);
+
+ ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo);
+ check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret);
+
+ return 0;
+}
+
+static int smsc75xx_phy_initialize(struct usbnet *dev)
+{
+ int bmcr, timeout = 0;
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = smsc75xx_mdio_read;
+ dev->mii.mdio_write = smsc75xx_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID;
+
+ /* reset phy and wait for reset to complete */
+ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+
+ do {
+ msleep(10);
+ bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
+ check_warn_return(bmcr, "Error reading MII_BMCR");
+ timeout++;
+ } while ((bmcr & MII_BMCR) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout on PHY Reset");
+ return -EIO;
+ }
+
+ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+
+ /* read to clear */
+ smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+ check_warn_return(bmcr, "Error reading PHY_INT_SRC");
+
+ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
+ PHY_INT_MASK_DEFAULT);
+ mii_nway_restart(&dev->mii);
+
+ netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+ return 0;
+}
+
+static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
+{
+ int ret = 0;
+ u32 buf;
+ bool rxenabled;
+
+ ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
+ check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+
+ rxenabled = ((buf & MAC_RX_RXEN) != 0);
+
+ if (rxenabled) {
+ buf &= ~MAC_RX_RXEN;
+ ret = smsc75xx_write_reg(dev, MAC_RX, buf);
+ check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+ }
+
+ /* add 4 to size for FCS */
+ buf &= ~MAC_RX_MAX_SIZE;
+ buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE);
+
+ ret = smsc75xx_write_reg(dev, MAC_RX, buf);
+ check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+
+ if (rxenabled) {
+ buf |= MAC_RX_RXEN;
+ ret = smsc75xx_write_reg(dev, MAC_RX, buf);
+ check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+ }
+
+ return 0;
+}
+
+static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
+ check_warn_return(ret, "Failed to set mac rx frame length");
+
+ return usbnet_change_mtu(netdev, new_mtu);
+}
+
+static int smsc75xx_reset(struct usbnet *dev)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u32 buf;
+ int ret = 0, timeout;
+
+ netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
+
+ ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
+ check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+
+ buf |= HW_CFG_LRST;
+
+ ret = smsc75xx_write_reg(dev, HW_CFG, buf);
+ check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+
+ timeout = 0;
+ do {
+ msleep(10);
+ ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
+ check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+ timeout++;
+ } while ((buf & HW_CFG_LRST) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout on completion of Lite Reset");
+ return -EIO;
+ }
+
+ netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY");
+
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+
+ buf |= PMT_CTL_PHY_RST;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+ check_warn_return(ret, "Failed to write PMT_CTL: %d", ret);
+
+ timeout = 0;
+ do {
+ msleep(10);
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+ timeout++;
+ } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout waiting for PHY Reset");
+ return -EIO;
+ }
+
+ netif_dbg(dev, ifup, dev->net, "PHY reset complete");
+
+ smsc75xx_init_mac_address(dev);
+
+ ret = smsc75xx_set_mac_address(dev);
+ check_warn_return(ret, "Failed to set mac address");
+
+ netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr);
+
+ ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
+ check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf);
+
+ buf |= HW_CFG_BIR;
+
+ ret = smsc75xx_write_reg(dev, HW_CFG, buf);
+ check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
+ check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after "
+ "writing HW_CFG_BIR: 0x%08x", buf);
+
+ if (!turbo_mode) {
+ buf = 0;
+ dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE;
+ } else if (dev->udev->speed == USB_SPEED_HIGH) {
+ buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE;
+ } else {
+ buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
+ }
+
+ netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld",
+ (ulong)dev->rx_urb_size);
+
+ ret = smsc75xx_write_reg(dev, BURST_CAP, buf);
+ check_warn_return(ret, "Failed to write BURST_CAP: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, BURST_CAP, &buf);
+ check_warn_return(ret, "Failed to read BURST_CAP: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from BURST_CAP after writing: 0x%08x", buf);
+
+ ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+ check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf);
+ check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from BULK_IN_DLY after writing: 0x%08x", buf);
+
+ if (turbo_mode) {
+ ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
+ check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
+
+ buf |= (HW_CFG_MEF | HW_CFG_BCE);
+
+ ret = smsc75xx_write_reg(dev, HW_CFG, buf);
+ check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
+ check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
+ }
+
+ /* set FIFO sizes */
+ buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+ ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+ check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf);
+
+ buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+ ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+ check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf);
+
+ ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
+ check_warn_return(ret, "Failed to write INT_STS: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, ID_REV, &buf);
+ check_warn_return(ret, "Failed to read ID_REV: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf);
+
+ /* Configure GPIO pins as LED outputs */
+ ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
+ check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
+
+ buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
+ buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
+
+ ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
+ check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
+
+ ret = smsc75xx_write_reg(dev, FLOW, 0);
+ check_warn_return(ret, "Failed to write FLOW: %d", ret);
+
+ ret = smsc75xx_write_reg(dev, FCT_FLOW, 0);
+ check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret);
+
+ /* Don't need rfe_ctl_lock during initialisation */
+ ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+ check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
+
+ pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF;
+
+ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ check_warn_return(ret, "Failed to write RFE_CTL: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+ check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
+
+ /* Enable or disable checksum offload engines */
+ ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE);
+ ret = smsc75xx_set_rx_csum_offload(dev);
+ check_warn_return(ret, "Failed to set rx csum offload: %d", ret);
+
+ smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE);
+
+ smsc75xx_set_multicast(dev->net);
+
+ ret = smsc75xx_phy_initialize(dev);
+ check_warn_return(ret, "Failed to initialize PHY: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf);
+ check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret);
+
+ /* enable PHY interrupts */
+ buf |= INT_ENP_PHY_INT;
+
+ ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
+ check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
+
+ ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
+ check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
+
+ buf |= MAC_TX_TXEN;
+
+ ret = smsc75xx_write_reg(dev, MAC_TX, buf);
+ check_warn_return(ret, "Failed to write MAC_TX: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf);
+
+ ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf);
+ check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret);
+
+ buf |= FCT_TX_CTL_EN;
+
+ ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf);
+ check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
+
+ ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
+ check_warn_return(ret, "Failed to set max rx frame length");
+
+ ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
+ check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+
+ buf |= MAC_RX_RXEN;
+
+ ret = smsc75xx_write_reg(dev, MAC_RX, buf);
+ check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf);
+
+ ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf);
+ check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret);
+
+ buf |= FCT_RX_CTL_EN;
+
+ ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf);
+ check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret);
+
+ netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf);
+
+ netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0");
+ return 0;
+}
+
+static const struct net_device_ops smsc75xx_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = smsc75xx_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = smsc75xx_ioctl,
+ .ndo_set_multicast_list = smsc75xx_set_multicast,
+};
+
+static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct smsc75xx_priv *pdata = NULL;
+ int ret;
+
+ printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
+
+ ret = usbnet_get_endpoints(dev, intf);
+ check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret);
+
+ dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
+ GFP_KERNEL);
+
+ pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ if (!pdata) {
+ netdev_warn(dev->net, "Unable to allocate smsc75xx_priv");
+ return -ENOMEM;
+ }
+
+ pdata->dev = dev;
+
+ spin_lock_init(&pdata->rfe_ctl_lock);
+ mutex_init(&pdata->dataport_mutex);
+
+ INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
+
+ pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE;
+
+ /* We have to advertise SG otherwise TSO cannot be enabled */
+ dev->net->features |= NETIF_F_SG;
+
+ /* Init all registers */
+ ret = smsc75xx_reset(dev);
+
+ dev->net->netdev_ops = &smsc75xx_netdev_ops;
+ dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
+ dev->net->flags |= IFF_MULTICAST;
+ dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
+ return 0;
+}
+
+static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ if (pdata) {
+ netif_dbg(dev, ifdown, dev->net, "free pdata");
+ kfree(pdata);
+ pdata = NULL;
+ dev->data[0] = 0;
+ }
+}
+
+static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a,
+ u32 rx_cmd_b)
+{
+ if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else {
+ skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+}
+
+static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+ while (skb->len > 0) {
+ u32 rx_cmd_a, rx_cmd_b, align_count, size;
+ struct sk_buff *ax_skb;
+ unsigned char *packet;
+
+ memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
+ le32_to_cpus(&rx_cmd_a);
+ skb_pull(skb, 4);
+
+ memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
+ le32_to_cpus(&rx_cmd_b);
+ skb_pull(skb, 4 + NET_IP_ALIGN);
+
+ packet = skb->data;
+
+ /* get the packet length */
+ size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN;
+ align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+
+ if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "Error rx_cmd_a=0x%08x", rx_cmd_a);
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_dropped++;
+
+ if (rx_cmd_a & RX_CMD_A_FCS)
+ dev->net->stats.rx_crc_errors++;
+ else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
+ dev->net->stats.rx_frame_errors++;
+ } else {
+ /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
+ if (unlikely(size > (ETH_FRAME_LEN + 12))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x", rx_cmd_a);
+ return 0;
+ }
+
+ /* last frame in this batch */
+ if (skb->len == size) {
+ if (pdata->use_rx_csum)
+ smsc75xx_rx_csum_offload(skb, rx_cmd_a,
+ rx_cmd_b);
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_trim(skb, skb->len - 4); /* remove fcs */
+ skb->truesize = size + sizeof(struct sk_buff);
+
+ return 1;
+ }
+
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!ax_skb)) {
+ netdev_warn(dev->net, "Error allocating skb");
+ return 0;
+ }
+
+ ax_skb->len = size;
+ ax_skb->data = packet;
+ skb_set_tail_pointer(ax_skb, size);
+
+ if (pdata->use_rx_csum)
+ smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a,
+ rx_cmd_b);
+ else
+ ax_skb->ip_summed = CHECKSUM_NONE;
+
+ skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
+ ax_skb->truesize = size + sizeof(struct sk_buff);
+
+ usbnet_skb_return(dev, ax_skb);
+ }
+
+ skb_pull(skb, size);
+
+ /* padding bytes before the next frame starts */
+ if (skb->len)
+ skb_pull(skb, align_count);
+ }
+
+ if (unlikely(skb->len < 0)) {
+ netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
+ struct sk_buff *skb, gfp_t flags)
+{
+ u32 tx_cmd_a, tx_cmd_b;
+
+ skb_linearize(skb);
+
+ if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
+ struct sk_buff *skb2 =
+ skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE;
+
+ if (skb_is_gso(skb)) {
+ u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN);
+ tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS;
+
+ tx_cmd_a |= TX_CMD_A_LSO;
+ } else {
+ tx_cmd_b = 0;
+ }
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_cmd_b);
+ memcpy(skb->data, &tx_cmd_b, 4);
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_cmd_a);
+ memcpy(skb->data, &tx_cmd_a, 4);
+
+ return skb;
+}
+
+static const struct driver_info smsc75xx_info = {
+ .description = "smsc75xx USB 2.0 Gigabit Ethernet",
+ .bind = smsc75xx_bind,
+ .unbind = smsc75xx_unbind,
+ .link_reset = smsc75xx_link_reset,
+ .reset = smsc75xx_reset,
+ .rx_fixup = smsc75xx_rx_fixup,
+ .tx_fixup = smsc75xx_tx_fixup,
+ .status = smsc75xx_status,
+ .flags = FLAG_ETHER | FLAG_SEND_ZLP,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ /* SMSC7500 USB Gigabit Ethernet Device */
+ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500),
+ .driver_info = (unsigned long) &smsc75xx_info,
+ },
+ {
+ /* SMSC7500 USB Gigabit Ethernet Device */
+ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505),
+ .driver_info = (unsigned long) &smsc75xx_info,
+ },
+ { }, /* END */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver smsc75xx_driver = {
+ .name = SMSC_CHIPNAME,
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disconnect = usbnet_disconnect,
+};
+
+static int __init smsc75xx_init(void)
+{
+ return usb_register(&smsc75xx_driver);
+}
+module_init(smsc75xx_init);
+
+static void __exit smsc75xx_exit(void)
+{
+ usb_deregister(&smsc75xx_driver);
+}
+module_exit(smsc75xx_exit);
+
+MODULE_AUTHOR("Nancy Lin");
+MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
+MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
new file mode 100644
index 000000000000..16e98c778344
--- /dev/null
+++ b/drivers/net/usb/smsc75xx.h
@@ -0,0 +1,421 @@
+ /***************************************************************************
+ *
+ * Copyright (C) 2007-2010 SMSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *****************************************************************************/
+
+#ifndef _SMSC75XX_H
+#define _SMSC75XX_H
+
+/* Tx command words */
+#define TX_CMD_A_LSO (0x08000000)
+#define TX_CMD_A_IPE (0x04000000)
+#define TX_CMD_A_TPE (0x02000000)
+#define TX_CMD_A_IVTG (0x01000000)
+#define TX_CMD_A_RVTG (0x00800000)
+#define TX_CMD_A_FCS (0x00400000)
+#define TX_CMD_A_LEN (0x000FFFFF)
+
+#define TX_CMD_B_MSS (0x3FFF0000)
+#define TX_CMD_B_MSS_SHIFT (16)
+#define TX_MSS_MIN ((u16)8)
+#define TX_CMD_B_VTAG (0x0000FFFF)
+
+/* Rx command words */
+#define RX_CMD_A_ICE (0x80000000)
+#define RX_CMD_A_TCE (0x40000000)
+#define RX_CMD_A_IPV (0x20000000)
+#define RX_CMD_A_PID (0x18000000)
+#define RX_CMD_A_PID_NIP (0x00000000)
+#define RX_CMD_A_PID_TCP (0x08000000)
+#define RX_CMD_A_PID_UDP (0x10000000)
+#define RX_CMD_A_PID_PP (0x18000000)
+#define RX_CMD_A_PFF (0x04000000)
+#define RX_CMD_A_BAM (0x02000000)
+#define RX_CMD_A_MAM (0x01000000)
+#define RX_CMD_A_FVTG (0x00800000)
+#define RX_CMD_A_RED (0x00400000)
+#define RX_CMD_A_RWT (0x00200000)
+#define RX_CMD_A_RUNT (0x00100000)
+#define RX_CMD_A_LONG (0x00080000)
+#define RX_CMD_A_RXE (0x00040000)
+#define RX_CMD_A_DRB (0x00020000)
+#define RX_CMD_A_FCS (0x00010000)
+#define RX_CMD_A_UAM (0x00008000)
+#define RX_CMD_A_LCSM (0x00004000)
+#define RX_CMD_A_LEN (0x00003FFF)
+
+#define RX_CMD_B_CSUM (0xFFFF0000)
+#define RX_CMD_B_CSUM_SHIFT (16)
+#define RX_CMD_B_VTAG (0x0000FFFF)
+
+/* SCSRs */
+#define ID_REV (0x0000)
+
+#define FPGA_REV (0x0004)
+
+#define BOND_CTL (0x0008)
+
+#define INT_STS (0x000C)
+#define INT_STS_RDFO_INT (0x00400000)
+#define INT_STS_TXE_INT (0x00200000)
+#define INT_STS_MACRTO_INT (0x00100000)
+#define INT_STS_TX_DIS_INT (0x00080000)
+#define INT_STS_RX_DIS_INT (0x00040000)
+#define INT_STS_PHY_INT_ (0x00020000)
+#define INT_STS_MAC_ERR_INT (0x00008000)
+#define INT_STS_TDFU (0x00004000)
+#define INT_STS_TDFO (0x00002000)
+#define INT_STS_GPIOS (0x00000FFF)
+#define INT_STS_CLEAR_ALL (0xFFFFFFFF)
+
+#define HW_CFG (0x0010)
+#define HW_CFG_SMDET_STS (0x00008000)
+#define HW_CFG_SMDET_EN (0x00004000)
+#define HW_CFG_EEM (0x00002000)
+#define HW_CFG_RST_PROTECT (0x00001000)
+#define HW_CFG_PORT_SWAP (0x00000800)
+#define HW_CFG_PHY_BOOST (0x00000600)
+#define HW_CFG_PHY_BOOST_NORMAL (0x00000000)
+#define HW_CFG_PHY_BOOST_4 (0x00002000)
+#define HW_CFG_PHY_BOOST_8 (0x00004000)
+#define HW_CFG_PHY_BOOST_12 (0x00006000)
+#define HW_CFG_LEDB (0x00000100)
+#define HW_CFG_BIR (0x00000080)
+#define HW_CFG_SBP (0x00000040)
+#define HW_CFG_IME (0x00000020)
+#define HW_CFG_MEF (0x00000010)
+#define HW_CFG_ETC (0x00000008)
+#define HW_CFG_BCE (0x00000004)
+#define HW_CFG_LRST (0x00000002)
+#define HW_CFG_SRST (0x00000001)
+
+#define PMT_CTL (0x0014)
+#define PMT_CTL_PHY_PWRUP (0x00000400)
+#define PMT_CTL_RES_CLR_WKP_EN (0x00000100)
+#define PMT_CTL_DEV_RDY (0x00000080)
+#define PMT_CTL_SUS_MODE (0x00000060)
+#define PMT_CTL_SUS_MODE_0 (0x00000000)
+#define PMT_CTL_SUS_MODE_1 (0x00000020)
+#define PMT_CTL_SUS_MODE_2 (0x00000040)
+#define PMT_CTL_SUS_MODE_3 (0x00000060)
+#define PMT_CTL_PHY_RST (0x00000010)
+#define PMT_CTL_WOL_EN (0x00000008)
+#define PMT_CTL_ED_EN (0x00000004)
+#define PMT_CTL_WUPS (0x00000003)
+#define PMT_CTL_WUPS_NO (0x00000000)
+#define PMT_CTL_WUPS_ED (0x00000001)
+#define PMT_CTL_WUPS_WOL (0x00000002)
+#define PMT_CTL_WUPS_MULTI (0x00000003)
+
+#define LED_GPIO_CFG (0x0018)
+#define LED_GPIO_CFG_LED2_FUN_SEL (0x80000000)
+#define LED_GPIO_CFG_LED10_FUN_SEL (0x40000000)
+#define LED_GPIO_CFG_LEDGPIO_EN (0x0000F000)
+#define LED_GPIO_CFG_LEDGPIO_EN_0 (0x00001000)
+#define LED_GPIO_CFG_LEDGPIO_EN_1 (0x00002000)
+#define LED_GPIO_CFG_LEDGPIO_EN_2 (0x00004000)
+#define LED_GPIO_CFG_LEDGPIO_EN_3 (0x00008000)
+#define LED_GPIO_CFG_GPBUF (0x00000F00)
+#define LED_GPIO_CFG_GPBUF_0 (0x00000100)
+#define LED_GPIO_CFG_GPBUF_1 (0x00000200)
+#define LED_GPIO_CFG_GPBUF_2 (0x00000400)
+#define LED_GPIO_CFG_GPBUF_3 (0x00000800)
+#define LED_GPIO_CFG_GPDIR (0x000000F0)
+#define LED_GPIO_CFG_GPDIR_0 (0x00000010)
+#define LED_GPIO_CFG_GPDIR_1 (0x00000020)
+#define LED_GPIO_CFG_GPDIR_2 (0x00000040)
+#define LED_GPIO_CFG_GPDIR_3 (0x00000080)
+#define LED_GPIO_CFG_GPDATA (0x0000000F)
+#define LED_GPIO_CFG_GPDATA_0 (0x00000001)
+#define LED_GPIO_CFG_GPDATA_1 (0x00000002)
+#define LED_GPIO_CFG_GPDATA_2 (0x00000004)
+#define LED_GPIO_CFG_GPDATA_3 (0x00000008)
+
+#define GPIO_CFG (0x001C)
+#define GPIO_CFG_SHIFT (24)
+#define GPIO_CFG_GPEN (0xFF000000)
+#define GPIO_CFG_GPBUF (0x00FF0000)
+#define GPIO_CFG_GPDIR (0x0000FF00)
+#define GPIO_CFG_GPDATA (0x000000FF)
+
+#define GPIO_WAKE (0x0020)
+#define GPIO_WAKE_PHY_LINKUP_EN (0x80000000)
+#define GPIO_WAKE_POL (0x0FFF0000)
+#define GPIO_WAKE_POL_SHIFT (16)
+#define GPIO_WAKE_WK (0x00000FFF)
+
+#define DP_SEL (0x0024)
+#define DP_SEL_DPRDY (0x80000000)
+#define DP_SEL_RSEL (0x0000000F)
+#define DP_SEL_URX (0x00000000)
+#define DP_SEL_VHF (0x00000001)
+#define DP_SEL_VHF_HASH_LEN (16)
+#define DP_SEL_VHF_VLAN_LEN (128)
+#define DP_SEL_LSO_HEAD (0x00000002)
+#define DP_SEL_FCT_RX (0x00000003)
+#define DP_SEL_FCT_TX (0x00000004)
+#define DP_SEL_DESCRIPTOR (0x00000005)
+#define DP_SEL_WOL (0x00000006)
+
+#define DP_CMD (0x0028)
+#define DP_CMD_WRITE (0x01)
+#define DP_CMD_READ (0x00)
+
+#define DP_ADDR (0x002C)
+
+#define DP_DATA (0x0030)
+
+#define BURST_CAP (0x0034)
+#define BURST_CAP_MASK (0x0000000F)
+
+#define INT_EP_CTL (0x0038)
+#define INT_EP_CTL_INTEP_ON (0x80000000)
+#define INT_EP_CTL_RDFO_EN (0x00400000)
+#define INT_EP_CTL_TXE_EN (0x00200000)
+#define INT_EP_CTL_MACROTO_EN (0x00100000)
+#define INT_EP_CTL_TX_DIS_EN (0x00080000)
+#define INT_EP_CTL_RX_DIS_EN (0x00040000)
+#define INT_EP_CTL_PHY_EN_ (0x00020000)
+#define INT_EP_CTL_MAC_ERR_EN (0x00008000)
+#define INT_EP_CTL_TDFU_EN (0x00004000)
+#define INT_EP_CTL_TDFO_EN (0x00002000)
+#define INT_EP_CTL_RX_FIFO_EN (0x00001000)
+#define INT_EP_CTL_GPIOX_EN (0x00000FFF)
+
+#define BULK_IN_DLY (0x003C)
+#define BULK_IN_DLY_MASK (0xFFFF)
+
+#define E2P_CMD (0x0040)
+#define E2P_CMD_BUSY (0x80000000)
+#define E2P_CMD_MASK (0x70000000)
+#define E2P_CMD_READ (0x00000000)
+#define E2P_CMD_EWDS (0x10000000)
+#define E2P_CMD_EWEN (0x20000000)
+#define E2P_CMD_WRITE (0x30000000)
+#define E2P_CMD_WRAL (0x40000000)
+#define E2P_CMD_ERASE (0x50000000)
+#define E2P_CMD_ERAL (0x60000000)
+#define E2P_CMD_RELOAD (0x70000000)
+#define E2P_CMD_TIMEOUT (0x00000400)
+#define E2P_CMD_LOADED (0x00000200)
+#define E2P_CMD_ADDR (0x000001FF)
+
+#define MAX_EEPROM_SIZE (512)
+
+#define E2P_DATA (0x0044)
+#define E2P_DATA_MASK_ (0x000000FF)
+
+#define RFE_CTL (0x0060)
+#define RFE_CTL_TCPUDP_CKM (0x00001000)
+#define RFE_CTL_IP_CKM (0x00000800)
+#define RFE_CTL_AB (0x00000400)
+#define RFE_CTL_AM (0x00000200)
+#define RFE_CTL_AU (0x00000100)
+#define RFE_CTL_VS (0x00000080)
+#define RFE_CTL_UF (0x00000040)
+#define RFE_CTL_VF (0x00000020)
+#define RFE_CTL_SPF (0x00000010)
+#define RFE_CTL_MHF (0x00000008)
+#define RFE_CTL_DHF (0x00000004)
+#define RFE_CTL_DPF (0x00000002)
+#define RFE_CTL_RST_RF (0x00000001)
+
+#define VLAN_TYPE (0x0064)
+#define VLAN_TYPE_MASK (0x0000FFFF)
+
+#define FCT_RX_CTL (0x0090)
+#define FCT_RX_CTL_EN (0x80000000)
+#define FCT_RX_CTL_RST (0x40000000)
+#define FCT_RX_CTL_SBF (0x02000000)
+#define FCT_RX_CTL_OVERFLOW (0x01000000)
+#define FCT_RX_CTL_FRM_DROP (0x00800000)
+#define FCT_RX_CTL_RX_NOT_EMPTY (0x00400000)
+#define FCT_RX_CTL_RX_EMPTY (0x00200000)
+#define FCT_RX_CTL_RX_DISABLED (0x00100000)
+#define FCT_RX_CTL_RXUSED (0x0000FFFF)
+
+#define FCT_TX_CTL (0x0094)
+#define FCT_TX_CTL_EN (0x80000000)
+#define FCT_TX_CTL_RST (0x40000000)
+#define FCT_TX_CTL_TX_NOT_EMPTY (0x00400000)
+#define FCT_TX_CTL_TX_EMPTY (0x00200000)
+#define FCT_TX_CTL_TX_DISABLED (0x00100000)
+#define FCT_TX_CTL_TXUSED (0x0000FFFF)
+
+#define FCT_RX_FIFO_END (0x0098)
+#define FCT_RX_FIFO_END_MASK (0x0000007F)
+
+#define FCT_TX_FIFO_END (0x009C)
+#define FCT_TX_FIFO_END_MASK (0x0000003F)
+
+#define FCT_FLOW (0x00A0)
+#define FCT_FLOW_THRESHOLD_OFF (0x00007F00)
+#define FCT_FLOW_THRESHOLD_OFF_SHIFT (8)
+#define FCT_FLOW_THRESHOLD_ON (0x0000007F)
+
+/* MAC CSRs */
+#define MAC_CR (0x100)
+#define MAC_CR_ADP (0x00002000)
+#define MAC_CR_ADD (0x00001000)
+#define MAC_CR_ASD (0x00000800)
+#define MAC_CR_INT_LOOP (0x00000400)
+#define MAC_CR_BOLMT (0x000000C0)
+#define MAC_CR_FDPX (0x00000008)
+#define MAC_CR_CFG (0x00000006)
+#define MAC_CR_CFG_10 (0x00000000)
+#define MAC_CR_CFG_100 (0x00000002)
+#define MAC_CR_CFG_1000 (0x00000004)
+#define MAC_CR_RST (0x00000001)
+
+#define MAC_RX (0x104)
+#define MAC_RX_MAX_SIZE (0x3FFF0000)
+#define MAC_RX_MAX_SIZE_SHIFT (16)
+#define MAC_RX_FCS_STRIP (0x00000010)
+#define MAC_RX_FSE (0x00000004)
+#define MAC_RX_RXD (0x00000002)
+#define MAC_RX_RXEN (0x00000001)
+
+#define MAC_TX (0x108)
+#define MAC_TX_BFCS (0x00000004)
+#define MAC_TX_TXD (0x00000002)
+#define MAC_TX_TXEN (0x00000001)
+
+#define FLOW (0x10C)
+#define FLOW_FORCE_FC (0x80000000)
+#define FLOW_TX_FCEN (0x40000000)
+#define FLOW_RX_FCEN (0x20000000)
+#define FLOW_FPF (0x10000000)
+#define FLOW_PAUSE_TIME (0x0000FFFF)
+
+#define RAND_SEED (0x110)
+#define RAND_SEED_MASK (0x0000FFFF)
+
+#define ERR_STS (0x114)
+#define ERR_STS_FCS_ERR (0x00000100)
+#define ERR_STS_LFRM_ERR (0x00000080)
+#define ERR_STS_RUNT_ERR (0x00000040)
+#define ERR_STS_COLLISION_ERR (0x00000010)
+#define ERR_STS_ALIGN_ERR (0x00000008)
+#define ERR_STS_URUN_ERR (0x00000004)
+
+#define RX_ADDRH (0x118)
+#define RX_ADDRH_MASK (0x0000FFFF)
+
+#define RX_ADDRL (0x11C)
+
+#define MII_ACCESS (0x120)
+#define MII_ACCESS_PHY_ADDR (0x0000F800)
+#define MII_ACCESS_PHY_ADDR_SHIFT (11)
+#define MII_ACCESS_REG_ADDR (0x000007C0)
+#define MII_ACCESS_REG_ADDR_SHIFT (6)
+#define MII_ACCESS_READ (0x00000000)
+#define MII_ACCESS_WRITE (0x00000002)
+#define MII_ACCESS_BUSY (0x00000001)
+
+#define MII_DATA (0x124)
+#define MII_DATA_MASK (0x0000FFFF)
+
+#define WUCSR (0x140)
+#define WUCSR_PFDA_FR (0x00000080)
+#define WUCSR_WUFR (0x00000040)
+#define WUCSR_MPR (0x00000020)
+#define WUCSR_BCAST_FR (0x00000010)
+#define WUCSR_PFDA_EN (0x00000008)
+#define WUCSR_WUEN (0x00000004)
+#define WUCSR_MPEN (0x00000002)
+#define WUCSR_BCST_EN (0x00000001)
+
+#define WUF_CFGX (0x144)
+#define WUF_CFGX_EN (0x80000000)
+#define WUF_CFGX_ATYPE (0x03000000)
+#define WUF_CFGX_ATYPE_UNICAST (0x00000000)
+#define WUF_CFGX_ATYPE_MULTICAST (0x02000000)
+#define WUF_CFGX_ATYPE_ALL (0x03000000)
+#define WUF_CFGX_PATTERN_OFFSET (0x007F0000)
+#define WUF_CFGX_PATTERN_OFFSET_SHIFT (16)
+#define WUF_CFGX_CRC16 (0x0000FFFF)
+#define WUF_NUM (8)
+
+#define WUF_MASKX (0x170)
+#define WUF_MASKX_AVALID (0x80000000)
+#define WUF_MASKX_ATYPE (0x40000000)
+
+#define ADDR_FILTX (0x300)
+#define ADDR_FILTX_FB_VALID (0x80000000)
+#define ADDR_FILTX_FB_TYPE (0x40000000)
+#define ADDR_FILTX_FB_ADDRHI (0x0000FFFF)
+#define ADDR_FILTX_SB_ADDRLO (0xFFFFFFFF)
+
+#define WUCSR2 (0x500)
+#define WUCSR2_NS_RCD (0x00000040)
+#define WUCSR2_ARP_RCD (0x00000020)
+#define WUCSR2_TCPSYN_RCD (0x00000010)
+#define WUCSR2_NS_OFFLOAD (0x00000004)
+#define WUCSR2_ARP_OFFLOAD (0x00000002)
+#define WUCSR2_TCPSYN_OFFLOAD (0x00000001)
+
+#define WOL_FIFO_STS (0x504)
+
+#define IPV6_ADDRX (0x510)
+
+#define IPV4_ADDRX (0x590)
+
+
+/* Vendor-specific PHY Definitions */
+
+/* Mode Control/Status Register */
+#define PHY_MODE_CTRL_STS (17)
+#define MODE_CTRL_STS_EDPWRDOWN ((u16)0x2000)
+#define MODE_CTRL_STS_ENERGYON ((u16)0x0002)
+
+#define PHY_INT_SRC (29)
+#define PHY_INT_SRC_ENERGY_ON ((u16)0x0080)
+#define PHY_INT_SRC_ANEG_COMP ((u16)0x0040)
+#define PHY_INT_SRC_REMOTE_FAULT ((u16)0x0020)
+#define PHY_INT_SRC_LINK_DOWN ((u16)0x0010)
+
+#define PHY_INT_MASK (30)
+#define PHY_INT_MASK_ENERGY_ON ((u16)0x0080)
+#define PHY_INT_MASK_ANEG_COMP ((u16)0x0040)
+#define PHY_INT_MASK_REMOTE_FAULT ((u16)0x0020)
+#define PHY_INT_MASK_LINK_DOWN ((u16)0x0010)
+#define PHY_INT_MASK_DEFAULT (PHY_INT_MASK_ANEG_COMP | \
+ PHY_INT_MASK_LINK_DOWN)
+
+#define PHY_SPECIAL (31)
+#define PHY_SPECIAL_SPD ((u16)0x001C)
+#define PHY_SPECIAL_SPD_10HALF ((u16)0x0004)
+#define PHY_SPECIAL_SPD_10FULL ((u16)0x0014)
+#define PHY_SPECIAL_SPD_100HALF ((u16)0x0008)
+#define PHY_SPECIAL_SPD_100FULL ((u16)0x0018)
+
+/* USB Vendor Requests */
+#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1
+#define USB_VENDOR_REQUEST_GET_STATS 0xA2
+
+/* Interrupt Endpoint status word bitfields */
+#define INT_ENP_RDFO_INT ((u32)BIT(22))
+#define INT_ENP_TXE_INT ((u32)BIT(21))
+#define INT_ENP_TX_DIS_INT ((u32)BIT(19))
+#define INT_ENP_RX_DIS_INT ((u32)BIT(18))
+#define INT_ENP_PHY_INT ((u32)BIT(17))
+#define INT_ENP_MAC_ERR_INT ((u32)BIT(15))
+#define INT_ENP_RX_FIFO_DATA_INT ((u32)BIT(12))
+
+#endif /* _SMSC75XX_H */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 0c3c738d7419..3135af63d378 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -28,6 +28,7 @@
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
@@ -78,7 +79,7 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
if (unlikely(ret < 0))
- devwarn(dev, "Failed to read register index 0x%08x", index);
+ netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index);
le32_to_cpus(buf);
*data = *buf;
@@ -106,7 +107,7 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
if (unlikely(ret < 0))
- devwarn(dev, "Failed to write register index 0x%08x", index);
+ netdev_warn(dev->net, "Failed to write register index 0x%08x\n", index);
kfree(buf);
@@ -138,7 +139,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
/* confirm MII not busy */
if (smsc95xx_phy_wait_not_busy(dev)) {
- devwarn(dev, "MII is busy in smsc95xx_mdio_read");
+ netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
mutex_unlock(&dev->phy_mutex);
return -EIO;
}
@@ -150,7 +151,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
smsc95xx_write_reg(dev, MII_ADDR, addr);
if (smsc95xx_phy_wait_not_busy(dev)) {
- devwarn(dev, "Timed out reading MII reg %02X", idx);
+ netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
mutex_unlock(&dev->phy_mutex);
return -EIO;
}
@@ -172,7 +173,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
/* confirm MII not busy */
if (smsc95xx_phy_wait_not_busy(dev)) {
- devwarn(dev, "MII is busy in smsc95xx_mdio_write");
+ netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
mutex_unlock(&dev->phy_mutex);
return;
}
@@ -187,7 +188,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
smsc95xx_write_reg(dev, MII_ADDR, addr);
if (smsc95xx_phy_wait_not_busy(dev))
- devwarn(dev, "Timed out writing MII reg %02X", idx);
+ netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
mutex_unlock(&dev->phy_mutex);
}
@@ -205,7 +206,7 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
} while (!time_after(jiffies, start_time + HZ));
if (val & (E2P_CMD_TIMEOUT_ | E2P_CMD_BUSY_)) {
- devwarn(dev, "EEPROM read operation timeout");
+ netdev_warn(dev->net, "EEPROM read operation timeout\n");
return -EIO;
}
@@ -226,7 +227,7 @@ static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
udelay(40);
} while (!time_after(jiffies, start_time + HZ));
- devwarn(dev, "EEPROM is busy");
+ netdev_warn(dev->net, "EEPROM is busy\n");
return -EIO;
}
@@ -308,7 +309,7 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
int status = urb->status;
if (status < 0)
- devwarn(dev, "async callback failed with %d", status);
+ netdev_warn(dev->net, "async callback failed with %d\n", status);
kfree(usb_context);
usb_free_urb(urb);
@@ -323,13 +324,13 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- devwarn(dev, "Error allocating URB");
+ netdev_warn(dev->net, "Error allocating URB\n");
return -ENOMEM;
}
usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC);
if (usb_context == NULL) {
- devwarn(dev, "Error allocating control msg");
+ netdev_warn(dev->net, "Error allocating control msg\n");
usb_free_urb(urb);
return -ENOMEM;
}
@@ -348,7 +349,8 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- devwarn(dev, "Error submitting control msg, sts=%d", status);
+ netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
+ status);
kfree(usb_context);
usb_free_urb(urb);
}
@@ -375,46 +377,32 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (dev->net->flags & IFF_PROMISC) {
- if (netif_msg_drv(dev))
- devdbg(dev, "promiscuous mode enabled");
+ netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n");
pdata->mac_cr |= MAC_CR_PRMS_;
pdata->mac_cr &= ~(MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
} else if (dev->net->flags & IFF_ALLMULTI) {
- if (netif_msg_drv(dev))
- devdbg(dev, "receive all multicast enabled");
+ netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n");
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
- } else if (dev->net->mc_count > 0) {
- struct dev_mc_list *mc_list = dev->net->mc_list;
- int count = 0;
+ } else if (!netdev_mc_empty(dev->net)) {
+ struct dev_mc_list *mc_list;
pdata->mac_cr |= MAC_CR_HPFILT_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- while (mc_list) {
- count++;
- if (mc_list->dmi_addrlen == ETH_ALEN) {
- u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
- u32 mask = 0x01 << (bitnum & 0x1F);
- if (bitnum & 0x20)
- hash_hi |= mask;
- else
- hash_lo |= mask;
- } else {
- devwarn(dev, "dmi_addrlen != 6");
- }
- mc_list = mc_list->next;
+ netdev_for_each_mc_addr(mc_list, netdev) {
+ u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
+ u32 mask = 0x01 << (bitnum & 0x1F);
+ if (bitnum & 0x20)
+ hash_hi |= mask;
+ else
+ hash_lo |= mask;
}
- if (count != ((u32)dev->net->mc_count))
- devwarn(dev, "mc_count != dev->mc_count");
-
- if (netif_msg_drv(dev))
- devdbg(dev, "HASHH=0x%08X, HASHL=0x%08X", hash_hi,
- hash_lo);
+ netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
+ hash_hi, hash_lo);
} else {
- if (netif_msg_drv(dev))
- devdbg(dev, "receive own packets only");
+ netif_dbg(dev, drv, dev->net, "receive own packets only\n");
pdata->mac_cr &=
~(MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
}
@@ -434,7 +422,7 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
if (ret < 0) {
- devwarn(dev, "error reading AFC_CFG");
+ netdev_warn(dev->net, "error reading AFC_CFG\n");
return;
}
@@ -451,13 +439,11 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
else
afc_cfg &= ~0xF;
- if (netif_msg_link(dev))
- devdbg(dev, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
+ cap & FLOW_CTRL_RX ? "enabled" : "disabled",
+ cap & FLOW_CTRL_TX ? "enabled" : "disabled");
} else {
- if (netif_msg_link(dev))
- devdbg(dev, "half duplex");
+ netif_dbg(dev, link, dev->net, "half duplex\n");
flow = 0;
afc_cfg |= 0xF;
}
@@ -485,9 +471,8 @@ static int smsc95xx_link_reset(struct usbnet *dev)
lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
- if (netif_msg_link(dev))
- devdbg(dev, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x",
- ecmd.speed, ecmd.duplex, lcladv, rmtadv);
+ netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x\n",
+ ecmd.speed, ecmd.duplex, lcladv, rmtadv);
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (ecmd.duplex != DUPLEX_FULL) {
@@ -511,20 +496,21 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
u32 intdata;
if (urb->actual_length != 4) {
- devwarn(dev, "unexpected urb length %d", urb->actual_length);
+ netdev_warn(dev->net, "unexpected urb length %d\n",
+ urb->actual_length);
return;
}
memcpy(&intdata, urb->transfer_buffer, 4);
le32_to_cpus(&intdata);
- if (netif_msg_link(dev))
- devdbg(dev, "intdata: 0x%08X", intdata);
+ netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
if (intdata & INT_ENP_PHY_INT_)
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
else
- devwarn(dev, "unexpected interrupt, intdata=0x%08X", intdata);
+ netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
+ intdata);
}
/* Enable or disable Tx & Rx checksum offload engines */
@@ -534,7 +520,7 @@ static int smsc95xx_set_csums(struct usbnet *dev)
u32 read_buf;
int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read COE_CR: %d", ret);
+ netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
return ret;
}
@@ -550,12 +536,11 @@ static int smsc95xx_set_csums(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write COE_CR: %d", ret);
+ netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
return ret;
}
- if (netif_msg_hw(dev))
- devdbg(dev, "COE_CR = 0x%08x", read_buf);
+ netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
return 0;
}
@@ -580,8 +565,8 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
struct usbnet *dev = netdev_priv(netdev);
if (ee->magic != LAN95XX_EEPROM_MAGIC) {
- devwarn(dev, "EEPROM: magic value mismatch, magic = 0x%x",
- ee->magic);
+ netdev_warn(dev->net, "EEPROM: magic value mismatch, magic = 0x%x\n",
+ ee->magic);
return -EINVAL;
}
@@ -659,16 +644,14 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
dev->net->dev_addr) == 0) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* eeprom values are valid so use them */
- if (netif_msg_ifup(dev))
- devdbg(dev, "MAC address read from EEPROM");
+ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n");
return;
}
}
/* no eeprom, or eeprom values are invalid. generate random MAC */
random_ether_addr(dev->net->dev_addr);
- if (netif_msg_ifup(dev))
- devdbg(dev, "MAC address set to random_ether_addr");
+ netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n");
}
static int smsc95xx_set_mac_address(struct usbnet *dev)
@@ -680,13 +663,13 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
if (ret < 0) {
- devwarn(dev, "Failed to write ADDRL: %d", ret);
+ netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
return ret;
}
ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
if (ret < 0) {
- devwarn(dev, "Failed to write ADDRH: %d", ret);
+ netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
return ret;
}
@@ -727,6 +710,8 @@ static void smsc95xx_start_rx_path(struct usbnet *dev)
static int smsc95xx_phy_initialize(struct usbnet *dev)
{
+ int bmcr, timeout = 0;
+
/* Initialize MII structure */
dev->mii.dev = dev->net;
dev->mii.mdio_read = smsc95xx_mdio_read;
@@ -735,7 +720,20 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID;
+ /* reset phy and wait for reset to complete */
smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+
+ do {
+ msleep(10);
+ bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
+ timeout++;
+ } while ((bmcr & MII_BMCR) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout on PHY Reset");
+ return -EIO;
+ }
+
smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
@@ -747,8 +745,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
PHY_INT_MASK_DEFAULT_);
mii_nway_restart(&dev->mii);
- if (netif_msg_ifup(dev))
- devdbg(dev, "phy initialised successfully");
+ netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
return 0;
}
@@ -759,14 +756,13 @@ static int smsc95xx_reset(struct usbnet *dev)
u32 read_buf, write_buf, burst_cap;
int ret = 0, timeout;
- if (netif_msg_ifup(dev))
- devdbg(dev, "entering smsc95xx_reset");
+ netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
write_buf = HW_CFG_LRST_;
ret = smsc95xx_write_reg(dev, HW_CFG, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write HW_CFG_LRST_ bit in HW_CFG "
- "register, ret = %d", ret);
+ netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
+ ret);
return ret;
}
@@ -774,7 +770,7 @@ static int smsc95xx_reset(struct usbnet *dev)
do {
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
msleep(10);
@@ -782,14 +778,14 @@ static int smsc95xx_reset(struct usbnet *dev)
} while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
if (timeout >= 100) {
- devwarn(dev, "timeout waiting for completion of Lite Reset");
+ netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
return ret;
}
write_buf = PM_CTL_PHY_RST_;
ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write PM_CTRL: %d", ret);
+ netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
return ret;
}
@@ -797,7 +793,7 @@ static int smsc95xx_reset(struct usbnet *dev)
do {
ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read PM_CTRL: %d", ret);
+ netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
return ret;
}
msleep(10);
@@ -805,7 +801,7 @@ static int smsc95xx_reset(struct usbnet *dev)
} while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
if (timeout >= 100) {
- devwarn(dev, "timeout waiting for PHY Reset");
+ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
return ret;
}
@@ -815,35 +811,35 @@ static int smsc95xx_reset(struct usbnet *dev)
if (ret < 0)
return ret;
- if (netif_msg_ifup(dev))
- devdbg(dev, "MAC Address: %pM", dev->net->dev_addr);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC Address: %pM\n", dev->net->dev_addr);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG : 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG : 0x%08x\n", read_buf);
read_buf |= HW_CFG_BIR_;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write HW_CFG_BIR_ bit in HW_CFG "
- "register, ret = %d", ret);
+ netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
+ ret);
return ret;
}
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG after writing "
- "HW_CFG_BIR_: 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
+ read_buf);
if (!turbo_mode) {
burst_cap = 0;
@@ -856,47 +852,47 @@ static int smsc95xx_reset(struct usbnet *dev)
dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "rx_urb_size=%ld", (ulong)dev->rx_urb_size);
+ netif_dbg(dev, ifup, dev->net,
+ "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
if (ret < 0) {
- devwarn(dev, "Failed to write BURST_CAP: %d", ret);
+ netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
return ret;
}
ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read BURST_CAP: %d", ret);
+ netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from BURST_CAP after writing: 0x%08x",
- read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from BURST_CAP after writing: 0x%08x\n",
+ read_buf);
read_buf = DEFAULT_BULK_IN_DELAY;
ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf);
if (ret < 0) {
- devwarn(dev, "ret = %d", ret);
+ netdev_warn(dev->net, "ret = %d\n", ret);
return ret;
}
ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read BULK_IN_DLY: %d", ret);
+ netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from BULK_IN_DLY after writing: "
- "0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
+ read_buf);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG: 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG: 0x%08x\n", read_buf);
if (turbo_mode)
read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_);
@@ -908,41 +904,41 @@ static int smsc95xx_reset(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write HW_CFG register, ret=%d", ret);
+ netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
+ ret);
return ret;
}
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG after writing: 0x%08x",
- read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
write_buf = 0xFFFFFFFF;
ret = smsc95xx_write_reg(dev, INT_STS, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write INT_STS register, ret=%d", ret);
+ netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
+ ret);
return ret;
}
ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read ID_REV: %d", ret);
+ netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "ID_REV = 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
/* Configure GPIO pins as LED outputs */
write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
LED_GPIO_CFG_FDX_LED;
ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write LED_GPIO_CFG register, ret=%d",
- ret);
+ netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
+ ret);
return ret;
}
@@ -950,21 +946,21 @@ static int smsc95xx_reset(struct usbnet *dev)
write_buf = 0;
ret = smsc95xx_write_reg(dev, FLOW, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write FLOW: %d", ret);
+ netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
return ret;
}
read_buf = AFC_CFG_DEFAULT;
ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write AFC_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
return ret;
}
/* Don't need mac_cr_lock during initialisation */
ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
if (ret < 0) {
- devwarn(dev, "Failed to read MAC_CR: %d", ret);
+ netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
return ret;
}
@@ -973,7 +969,7 @@ static int smsc95xx_reset(struct usbnet *dev)
write_buf = (u32)ETH_P_8021Q;
ret = smsc95xx_write_reg(dev, VLAN1, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write VAN1: %d", ret);
+ netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
return ret;
}
@@ -981,7 +977,7 @@ static int smsc95xx_reset(struct usbnet *dev)
ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
ret = smsc95xx_set_csums(dev);
if (ret < 0) {
- devwarn(dev, "Failed to set csum offload: %d", ret);
+ netdev_warn(dev->net, "Failed to set csum offload: %d\n", ret);
return ret;
}
@@ -992,7 +988,7 @@ static int smsc95xx_reset(struct usbnet *dev)
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read INT_EP_CTL: %d", ret);
+ netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
return ret;
}
@@ -1001,15 +997,14 @@ static int smsc95xx_reset(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write INT_EP_CTL: %d", ret);
+ netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
return ret;
}
smsc95xx_start_tx_path(dev);
smsc95xx_start_rx_path(dev);
- if (netif_msg_ifup(dev))
- devdbg(dev, "smsc95xx_reset, return 0");
+ netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
return 0;
}
@@ -1034,7 +1029,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = usbnet_get_endpoints(dev, intf);
if (ret < 0) {
- devwarn(dev, "usbnet_get_endpoints failed: %d", ret);
+ netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
return ret;
}
@@ -1043,7 +1038,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
pdata = (struct smsc95xx_priv *)(dev->data[0]);
if (!pdata) {
- devwarn(dev, "Unable to allocate struct smsc95xx_priv");
+ netdev_warn(dev->net, "Unable to allocate struct smsc95xx_priv\n");
return -ENOMEM;
}
@@ -1066,8 +1061,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
if (pdata) {
- if (netif_msg_ifdown(dev))
- devdbg(dev, "free pdata");
+ netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
dev->data[0] = 0;
@@ -1101,8 +1095,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
if (unlikely(header & RX_STS_ES_)) {
- if (netif_msg_rx_err(dev))
- devdbg(dev, "Error header=0x%08x", header);
+ netif_dbg(dev, rx_err, dev->net,
+ "Error header=0x%08x\n", header);
dev->net->stats.rx_errors++;
dev->net->stats.rx_dropped++;
@@ -1119,9 +1113,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
if (unlikely(size > (ETH_FRAME_LEN + 12))) {
- if (netif_msg_rx_err(dev))
- devdbg(dev, "size err header=0x%08x",
- header);
+ netif_dbg(dev, rx_err, dev->net,
+ "size err header=0x%08x\n", header);
return 0;
}
@@ -1137,7 +1130,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!ax_skb)) {
- devwarn(dev, "Error allocating skb");
+ netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
@@ -1161,7 +1154,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
if (unlikely(skb->len < 0)) {
- devwarn(dev, "invalid rx length<0 %d", skb->len);
+ netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
return 0;
}
@@ -1197,9 +1190,21 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
}
if (csum) {
- u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
- skb_push(skb, 4);
- memcpy(skb->data, &csum_preamble, 4);
+ if (skb->len <= 45) {
+ /* workaround - hardware tx checksum does not work
+ * properly with extremely small packets */
+ long csstart = skb->csum_start - skb_headroom(skb);
+ __wsum calc = csum_partial(skb->data + csstart,
+ skb->len - csstart, 0);
+ *((__sum16 *)(skb->data + csstart
+ + skb->csum_offset)) = csum_fold(calc);
+
+ csum = false;
+ } else {
+ u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
+ skb_push(skb, 4);
+ memcpy(skb->data, &csum_preamble, 4);
+ }
}
skb_push(skb, 4);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 035fab04c0a0..7177abc78dc6 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -43,6 +43,7 @@
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
#define DRIVER_VERSION "22-Aug-2005"
@@ -242,13 +243,13 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb->len;
- if (netif_msg_rx_status (dev))
- devdbg (dev, "< rx, len %zu, type 0x%x",
- skb->len + sizeof (struct ethhdr), skb->protocol);
+ netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof (struct ethhdr), skb->protocol);
memset (skb->cb, 0, sizeof (struct skb_data));
status = netif_rx (skb);
- if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
- devdbg (dev, "netif_rx status %d", status);
+ if (status != NET_RX_SUCCESS)
+ netif_dbg(dev, rx_err, dev->net,
+ "netif_rx status %d\n", status);
}
EXPORT_SYMBOL_GPL(usbnet_skb_return);
@@ -313,9 +314,9 @@ void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
if (!schedule_work (&dev->kevent))
- deverr (dev, "kevent %d may have been dropped", work);
+ netdev_err(dev->net, "kevent %d may have been dropped\n", work);
else
- devdbg (dev, "kevent %d scheduled", work);
+ netdev_dbg(dev->net, "kevent %d scheduled\n", work);
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -332,8 +333,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
size_t size = dev->rx_urb_size;
if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
- if (netif_msg_rx_err (dev))
- devdbg (dev, "no rx skb");
+ netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb);
return;
@@ -363,21 +363,19 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
break;
case -ENODEV:
- if (netif_msg_ifdown (dev))
- devdbg (dev, "device gone");
+ netif_dbg(dev, ifdown, dev->net, "device gone\n");
netif_device_detach (dev->net);
break;
default:
- if (netif_msg_rx_err (dev))
- devdbg (dev, "rx submit, %d", retval);
+ netif_dbg(dev, rx_err, dev->net,
+ "rx submit, %d\n", retval);
tasklet_schedule (&dev->bh);
break;
case 0:
__skb_queue_tail (&dev->rxq, skb);
}
} else {
- if (netif_msg_ifdown (dev))
- devdbg (dev, "rx: stopped");
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
retval = -ENOLINK;
}
spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
@@ -400,8 +398,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
if (skb->len)
usbnet_skb_return (dev, skb);
else {
- if (netif_msg_rx_err (dev))
- devdbg (dev, "drop");
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
error:
dev->net->stats.rx_errors++;
skb_queue_tail (&dev->done, skb);
@@ -428,8 +425,8 @@ static void rx_complete (struct urb *urb)
entry->state = rx_cleanup;
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
- if (netif_msg_rx_err (dev))
- devdbg (dev, "rx length %d", skb->len);
+ netif_dbg(dev, rx_err, dev->net,
+ "rx length %d\n", skb->len);
}
break;
@@ -446,8 +443,8 @@ static void rx_complete (struct urb *urb)
/* software-driven interface shutdown */
case -ECONNRESET: /* async unlink */
case -ESHUTDOWN: /* hardware gone */
- if (netif_msg_ifdown (dev))
- devdbg (dev, "rx shutdown, code %d", urb_status);
+ netif_dbg(dev, ifdown, dev->net,
+ "rx shutdown, code %d\n", urb_status);
goto block;
/* we get controller i/o faults during khubd disconnect() delays.
@@ -460,8 +457,8 @@ static void rx_complete (struct urb *urb)
dev->net->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
- if (netif_msg_link (dev))
- devdbg (dev, "rx throttle %d", urb_status);
+ netif_dbg(dev, link, dev->net,
+ "rx throttle %d\n", urb_status);
}
block:
entry->state = rx_cleanup;
@@ -477,8 +474,7 @@ block:
default:
entry->state = rx_cleanup;
dev->net->stats.rx_errors++;
- if (netif_msg_rx_err (dev))
- devdbg (dev, "rx status %d", urb_status);
+ netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
break;
}
@@ -492,8 +488,7 @@ block:
}
usb_free_urb (urb);
}
- if (netif_msg_rx_err (dev))
- devdbg (dev, "no read resubmitted");
+ netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
static void intr_complete (struct urb *urb)
@@ -510,15 +505,15 @@ static void intr_complete (struct urb *urb)
/* software-driven interface shutdown */
case -ENOENT: /* urb killed */
case -ESHUTDOWN: /* hardware gone */
- if (netif_msg_ifdown (dev))
- devdbg (dev, "intr shutdown, code %d", status);
+ netif_dbg(dev, ifdown, dev->net,
+ "intr shutdown, code %d\n", status);
return;
/* NOTE: not throttling like RX/TX, since this endpoint
* already polls infrequently
*/
default:
- devdbg (dev, "intr status %d", status);
+ netdev_dbg(dev->net, "intr status %d\n", status);
break;
}
@@ -527,8 +522,9 @@ static void intr_complete (struct urb *urb)
memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status != 0 && netif_msg_timer (dev))
- deverr(dev, "intr resubmit --> %d", status);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+ "intr resubmit --> %d\n", status);
}
/*-------------------------------------------------------------------------*/
@@ -536,8 +532,7 @@ void usbnet_pause_rx(struct usbnet *dev)
{
set_bit(EVENT_RX_PAUSED, &dev->flags);
- if (netif_msg_rx_status(dev))
- devdbg(dev, "paused rx queue enabled");
+ netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
}
EXPORT_SYMBOL_GPL(usbnet_pause_rx);
@@ -555,8 +550,8 @@ void usbnet_resume_rx(struct usbnet *dev)
tasklet_schedule(&dev->bh);
- if (netif_msg_rx_status(dev))
- devdbg(dev, "paused rx queue disabled, %d skbs requeued", num);
+ netif_dbg(dev, rx_status, dev->net,
+ "paused rx queue disabled, %d skbs requeued\n", num);
}
EXPORT_SYMBOL_GPL(usbnet_resume_rx);
@@ -589,7 +584,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
if (retval != -EINPROGRESS && retval != 0)
- devdbg (dev, "unlink urb err, %d", retval);
+ netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
}
@@ -631,9 +626,8 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
&& !skb_queue_empty(&dev->done)) {
schedule_timeout(UNLINK_TIMEOUT_MS);
set_current_state(TASK_UNINTERRUPTIBLE);
- if (netif_msg_ifdown(dev))
- devdbg(dev, "waited for %d urb completions",
- temp);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
dev->wait = NULL;
@@ -648,22 +642,21 @@ int usbnet_stop (struct net_device *net)
netif_stop_queue (net);
- if (netif_msg_ifdown (dev))
- devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
- net->stats.rx_packets, net->stats.tx_packets,
- net->stats.rx_errors, net->stats.tx_errors
- );
+ netif_info(dev, ifdown, dev->net,
+ "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
retval = info->stop(dev);
- if (retval < 0 && netif_msg_ifdown(dev))
- devinfo(dev,
- "stop fail (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ if (retval < 0)
+ netif_info(dev, ifdown, dev->net,
+ "stop fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ info->description);
}
if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
@@ -702,30 +695,29 @@ int usbnet_open (struct net_device *net)
struct driver_info *info = dev->driver_info;
if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
- if (netif_msg_ifup (dev))
- devinfo (dev,
- "resumption fail (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ netif_info(dev, ifup, dev->net,
+ "resumption fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
goto done_nopm;
}
// put into "known safe" state
if (info->reset && (retval = info->reset (dev)) < 0) {
- if (netif_msg_ifup (dev))
- devinfo (dev,
- "open reset fail (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ netif_info(dev, ifup, dev->net,
+ "open reset fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
goto done;
}
// insist peer be connected
if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
- if (netif_msg_ifup (dev))
- devdbg (dev, "can't open; %d", retval);
+ netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
goto done;
}
@@ -733,34 +725,23 @@ int usbnet_open (struct net_device *net)
if (dev->interrupt) {
retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
if (retval < 0) {
- if (netif_msg_ifup (dev))
- deverr (dev, "intr submit %d", retval);
+ netif_err(dev, ifup, dev->net,
+ "intr submit %d\n", retval);
goto done;
}
}
netif_start_queue (net);
- if (netif_msg_ifup (dev)) {
- char *framing;
-
- if (dev->driver_info->flags & FLAG_FRAMING_NC)
- framing = "NetChip";
- else if (dev->driver_info->flags & FLAG_FRAMING_GL)
- framing = "GeneSys";
- else if (dev->driver_info->flags & FLAG_FRAMING_Z)
- framing = "Zaurus";
- else if (dev->driver_info->flags & FLAG_FRAMING_RN)
- framing = "RNDIS";
- else if (dev->driver_info->flags & FLAG_FRAMING_AX)
- framing = "ASIX";
- else
- framing = "simple";
-
- devinfo (dev, "open: enable queueing "
- "(rx %d, tx %d) mtu %d %s framing",
- (int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
- framing);
- }
+ netif_info(dev, ifup, dev->net,
+ "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
+ (int)RX_QLEN(dev), (int)TX_QLEN(dev),
+ dev->net->mtu,
+ (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
+ (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
+ (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
+ (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
+ (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
+ "simple");
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
@@ -771,6 +752,7 @@ int usbnet_open (struct net_device *net)
usb_autopm_put_interface(dev->intf);
}
return retval;
+
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -908,8 +890,8 @@ kevent (struct work_struct *work)
status != -ESHUTDOWN) {
if (netif_msg_tx_err (dev))
fail_pipe:
- deverr (dev, "can't clear tx halt, status %d",
- status);
+ netdev_err(dev->net, "can't clear tx halt, status %d\n",
+ status);
} else {
clear_bit (EVENT_TX_HALT, &dev->flags);
if (status != -ESHUTDOWN)
@@ -928,8 +910,8 @@ fail_pipe:
status != -ESHUTDOWN) {
if (netif_msg_rx_err (dev))
fail_halt:
- deverr (dev, "can't clear rx halt, status %d",
- status);
+ netdev_err(dev->net, "can't clear rx halt, status %d\n",
+ status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
tasklet_schedule (&dev->bh);
@@ -967,18 +949,18 @@ fail_lowmem:
if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
usb_autopm_put_interface(dev->intf);
skip_reset:
- devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
} else {
usb_autopm_put_interface(dev->intf);
}
}
if (dev->flags)
- devdbg (dev, "kevent done, flags = 0x%lx",
- dev->flags);
+ netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
}
/*-------------------------------------------------------------------------*/
@@ -1014,15 +996,14 @@ static void tx_complete (struct urb *urb)
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay,
jiffies + THROTTLE_JIFFIES);
- if (netif_msg_link (dev))
- devdbg (dev, "tx throttle %d",
- urb->status);
+ netif_dbg(dev, link, dev->net,
+ "tx throttle %d\n", urb->status);
}
netif_stop_queue (dev->net);
break;
default:
- if (netif_msg_tx_err (dev))
- devdbg (dev, "tx err %d", entry->urb->status);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx err %d\n", entry->urb->status);
break;
}
}
@@ -1064,16 +1045,14 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- if (netif_msg_tx_err (dev))
- devdbg (dev, "can't tx_fixup skb");
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
goto drop;
}
}
length = skb->len;
if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
- if (netif_msg_tx_err (dev))
- devdbg (dev, "no urb");
+ netif_dbg(dev, tx_err, dev->net, "no urb\n");
goto drop;
}
@@ -1113,7 +1092,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
/* no use to process more packets */
netif_stop_queue(net);
spin_unlock_irqrestore(&dev->txq.lock, flags);
- devdbg(dev, "Delaying transmission for resumption");
+ netdev_dbg(dev->net, "Delaying transmission for resumption\n");
goto deferred;
}
#endif
@@ -1126,8 +1105,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
break;
default:
usb_autopm_put_interface_async(dev->intf);
- if (netif_msg_tx_err (dev))
- devdbg (dev, "tx: submit urb err %d", retval);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx: submit urb err %d\n", retval);
break;
case 0:
net->trans_start = jiffies;
@@ -1138,17 +1117,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
spin_unlock_irqrestore (&dev->txq.lock, flags);
if (retval) {
- if (netif_msg_tx_err (dev))
- devdbg (dev, "drop, code %d", retval);
+ netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
dev->net->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
- } else if (netif_msg_tx_queued (dev)) {
- devdbg (dev, "> tx, len %d, type 0x%x",
- length, skb->protocol);
- }
+ } else
+ netif_dbg(dev, tx_queued, dev->net,
+ "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
@@ -1179,7 +1156,7 @@ static void usbnet_bh (unsigned long param)
dev_kfree_skb (skb);
continue;
default:
- devdbg (dev, "bogus skb state %d", entry->state);
+ netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
}
}
@@ -1207,9 +1184,10 @@ static void usbnet_bh (unsigned long param)
if (urb != NULL)
rx_submit (dev, urb, GFP_ATOMIC);
}
- if (temp != dev->rxq.qlen && netif_msg_link (dev))
- devdbg (dev, "rxqlen %d --> %d",
- temp, dev->rxq.qlen);
+ if (temp != dev->rxq.qlen)
+ netif_dbg(dev, link, dev->net,
+ "rxqlen %d --> %d\n",
+ temp, dev->rxq.qlen);
if (dev->rxq.qlen < qlen)
tasklet_schedule (&dev->bh);
}
@@ -1240,11 +1218,10 @@ void usbnet_disconnect (struct usb_interface *intf)
xdev = interface_to_usbdev (intf);
- if (netif_msg_probe (dev))
- devinfo (dev, "unregister '%s' usb-%s-%s, %s",
- intf->dev.driver->name,
- xdev->bus->bus_name, xdev->devpath,
- dev->driver_info->description);
+ netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
+ intf->dev.driver->name,
+ xdev->bus->bus_name, xdev->devpath,
+ dev->driver_info->description);
net = dev->net;
unregister_netdev (net);
@@ -1407,12 +1384,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
status = register_netdev (net);
if (status)
goto out3;
- if (netif_msg_probe (dev))
- devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
- udev->dev.driver->name,
- xdev->bus->bus_name, xdev->devpath,
- dev->driver_info->description,
- net->dev_addr);
+ netif_info(dev, probe, dev->net,
+ "register '%s' at usb-%s-%s, %s, %pM\n",
+ udev->dev.driver->name,
+ xdev->bus->bus_name, xdev->devpath,
+ dev->driver_info->description,
+ net->dev_addr);
// ok, it's ready to go.
usb_set_intfdata (udev, dev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 3a15de56df9c..f9f0730b53d5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -9,6 +9,7 @@
*/
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
@@ -34,7 +35,7 @@ struct veth_net_stats {
struct veth_priv {
struct net_device *peer;
- struct veth_net_stats *stats;
+ struct veth_net_stats __percpu *stats;
unsigned ip_summed;
};
@@ -263,7 +264,7 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- struct veth_net_stats *stats;
+ struct veth_net_stats __percpu *stats;
struct veth_priv *priv;
stats = alloc_percpu(struct veth_net_stats);
@@ -333,19 +334,17 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
struct veth_priv *priv;
char ifname[IFNAMSIZ];
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
+ struct ifinfomsg *ifmp;
struct net *net;
/*
* create and register peer first
- *
- * struct ifinfomsg is at the head of VETH_INFO_PEER, but we
- * skip it since no info from it is useful yet
*/
-
if (data != NULL && data[VETH_INFO_PEER] != NULL) {
struct nlattr *nla_peer;
nla_peer = data[VETH_INFO_PEER];
+ ifmp = nla_data(nla_peer);
err = nla_parse(peer_tb, IFLA_MAX,
nla_data(nla_peer) + sizeof(struct ifinfomsg),
nla_len(nla_peer) - sizeof(struct ifinfomsg),
@@ -358,8 +357,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
return err;
tbp = peer_tb;
- } else
+ } else {
+ ifmp = NULL;
tbp = tb;
+ }
if (tbp[IFLA_IFNAME])
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
@@ -387,6 +388,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
netif_carrier_off(peer);
+ err = rtnl_configure_link(peer, ifmp);
+ if (err < 0)
+ goto err_configure_peer;
+
/*
* register dev last
*
@@ -428,6 +433,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
err_register_dev:
/* nothing to do */
err_alloc_name:
+err_configure_peer:
unregister_netdevice(peer);
return err;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 593e01f64e9b..388751aa66e0 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -89,7 +89,6 @@ static const int multicast_filter_limit = 32;
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -102,6 +101,7 @@ static const int multicast_filter_limit = 32;
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
+#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
@@ -266,7 +266,7 @@ enum rhine_quirks {
/* Beware of PCI posted writes */
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
-static const struct pci_device_id rhine_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
@@ -389,6 +389,7 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
+ struct work_struct reset_task;
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
@@ -407,6 +408,7 @@ struct rhine_private {
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
+static void rhine_reset_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
@@ -775,6 +777,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
+ INIT_WORK(&rp->reset_task, rhine_reset_task);
+
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1183,18 @@ static int rhine_open(struct net_device *dev)
return 0;
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_reset_task(struct work_struct *work)
{
- struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
-
- printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
- "%4.4x, resetting...\n",
- dev->name, ioread16(ioaddr + IntrStatus),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+ struct rhine_private *rp = container_of(work, struct rhine_private,
+ reset_task);
+ struct net_device *dev = rp->dev;
/* protect against concurrent rx interrupts */
disable_irq(rp->pdev->irq);
napi_disable(&rp->napi);
- spin_lock(&rp->lock);
+ spin_lock_bh(&rp->lock);
/* clear all descriptors */
free_tbufs(dev);
@@ -1206,7 +1206,7 @@ static void rhine_tx_timeout(struct net_device *dev)
rhine_chip_reset(dev);
init_registers(dev);
- spin_unlock(&rp->lock);
+ spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
dev->trans_start = jiffies;
@@ -1214,6 +1214,19 @@ static void rhine_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void rhine_tx_timeout(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ schedule_work(&rp->reset_task);
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1683,7 +1696,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
rx_mode = 0x1C;
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
@@ -1691,10 +1704,9 @@ static void rhine_set_rx_mode(struct net_device *dev)
rx_mode = 0x0C;
} else {
struct dev_mc_list *mclist;
- int i;
+
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -1830,10 +1842,11 @@ static int rhine_close(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- spin_lock_irq(&rp->lock);
-
- netif_stop_queue(dev);
napi_disable(&rp->napi);
+ cancel_work_sync(&rp->reset_task);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&rp->lock);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 4ceb441f2687..bc278d4ee89d 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
* Describe the PCI device identifiers that we support in this
* device driver. Used for hotplug autoloading.
*/
-static const struct pci_device_id velocity_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
{ }
};
@@ -812,7 +812,7 @@ static void set_mii_flow_control(struct velocity_info *vptr)
case FLOW_CNTL_TX_RX:
MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
break;
case FLOW_CNTL_DISABLE:
@@ -1132,7 +1132,7 @@ static void velocity_set_multi(struct net_device *dev)
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
- } else if ((dev->mc_count > vptr->multicast_limit) ||
+ } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
(dev->flags & IFF_ALLMULTI)) {
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
@@ -1141,9 +1141,11 @@ static void velocity_set_multi(struct net_device *dev)
int offset = MCAM_SIZE - vptr->multicast_limit;
mac_get_cam_mask(regs, vptr->mCAMmask);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
mac_set_cam(regs, i + offset, mclist->dmi_addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
+ i++;
}
mac_set_cam_mask(regs, vptr->mCAMmask);
@@ -1877,13 +1879,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
/**
* tx_srv - transmit interrupt service
* @vptr; Velocity
- * @status:
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
* necessary/
*/
-static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
+static int velocity_tx_srv(struct velocity_info *vptr)
{
struct tx_desc *td;
int qnum;
@@ -2090,14 +2091,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
- * @status: adapter status (unused)
*
* Walk the receive ring of the velocity adapter and remove
* any received packets from the receive queue. Hand the ring
* slots back to the adapter for reuse.
*/
-static int velocity_rx_srv(struct velocity_info *vptr, int status,
- int budget_left)
+static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
{
struct net_device_stats *stats = &vptr->dev->stats;
int rd_curr = vptr->rx.curr;
@@ -2151,32 +2150,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
struct velocity_info *vptr = container_of(napi,
struct velocity_info, napi);
unsigned int rx_done;
- u32 isr_status;
-
- spin_lock(&vptr->lock);
- isr_status = mac_read_isr(vptr->mac_regs);
-
- /* Ack the interrupt */
- mac_write_isr(vptr->mac_regs, isr_status);
- if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
- velocity_error(vptr, isr_status);
+ unsigned long flags;
+ spin_lock_irqsave(&vptr->lock, flags);
/*
* Do rx and tx twice for performance (taken from the VIA
* out-of-tree driver).
*/
- rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
- velocity_tx_srv(vptr, isr_status);
- rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
- velocity_tx_srv(vptr, isr_status);
-
- spin_unlock(&vptr->lock);
+ rx_done = velocity_rx_srv(vptr, budget / 2);
+ velocity_tx_srv(vptr);
+ rx_done += velocity_rx_srv(vptr, budget - rx_done);
+ velocity_tx_srv(vptr);
/* If budget not fully consumed, exit the polling mode */
if (rx_done < budget) {
napi_complete(napi);
mac_enable_int(vptr->mac_regs);
}
+ spin_unlock_irqrestore(&vptr->lock, flags);
return rx_done;
}
@@ -2206,10 +2197,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
return IRQ_NONE;
}
+ /* Ack the interrupt */
+ mac_write_isr(vptr->mac_regs, isr_status);
+
if (likely(napi_schedule_prep(&vptr->napi))) {
mac_disable_int(vptr->mac_regs);
__napi_schedule(&vptr->napi);
}
+
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+ velocity_error(vptr, isr_status);
+
spin_unlock(&vptr->lock);
return IRQ_HANDLED;
@@ -2237,8 +2235,6 @@ static int velocity_open(struct net_device *dev)
/* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
@@ -2250,6 +2246,8 @@ static int velocity_open(struct net_device *dev)
goto out;
}
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
napi_enable(&vptr->napi);
@@ -2339,10 +2337,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
@@ -2702,10 +2700,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
struct net_device *dev = vptr->dev;
printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
- printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- dev->name,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_INFO "%s: Ethernet Address: %pM\n",
+ dev->name, dev->dev_addr);
}
static u32 velocity_get_link(struct net_device *dev)
@@ -2829,7 +2825,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
ret = register_netdev(dev);
if (ret < 0)
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
velocity_init_registers(vptr, VELOCITY_INIT_WOL);
mac_disable_int(vptr->mac_regs);
- velocity_tx_srv(vptr, 0);
+ velocity_tx_srv(vptr);
for (i = 0; i < vptr->tx.numq; i++) {
if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
{
struct velocity_info *vptr = netdev_priv(dev);
int max_us = 0x3f * 64;
+ unsigned long flags;
/* 6 bits of */
if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
ecmd->tx_coalesce_usecs);
/* Setup the interrupt suppression and queue timers */
+ spin_lock_irqsave(&vptr->lock, flags);
mac_disable_int(vptr->mac_regs);
setup_adaptive_interrupts(vptr);
setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
mac_clear_isr(vptr->mac_regs);
mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c708ecc3cb2e..b0577dd1a42d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -25,6 +25,7 @@
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
+#include <linux/slab.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
@@ -56,10 +57,6 @@ struct virtnet_info
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
- /* Receive & send queues. */
- struct sk_buff_head recv;
- struct sk_buff_head send;
-
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
@@ -75,34 +72,44 @@ struct skb_vnet_hdr {
unsigned int num_sg;
};
+struct padded_vnet_hdr {
+ struct virtio_net_hdr hdr;
+ /*
+ * virtio_net_hdr should be in a separated sg buffer because of a
+ * QEMU bug, and data sg buffer shares same page with this header sg.
+ * This padding makes next sg 16 byte aligned after virtio_net_hdr.
+ */
+ char padding[6];
+};
+
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct skb_vnet_hdr *)skb->cb;
}
-static void give_a_page(struct virtnet_info *vi, struct page *page)
-{
- page->private = (unsigned long)vi->pages;
- vi->pages = page;
-}
-
-static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
+/*
+ * private is used to chain pages for big packets, put the whole
+ * most recent used list in the beginning for reuse
+ */
+static void give_pages(struct virtnet_info *vi, struct page *page)
{
- unsigned int i;
+ struct page *end;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- give_a_page(vi, skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->nr_frags = 0;
- skb->data_len = 0;
+ /* Find end of list, sew whole thing into vi->pages. */
+ for (end = page; end->private; end = (struct page *)end->private);
+ end->private = (unsigned long)vi->pages;
+ vi->pages = page;
}
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
struct page *p = vi->pages;
- if (p)
+ if (p) {
vi->pages = (struct page *)p->private;
- else
+ /* clear private here, it is used to chain pages */
+ p->private = 0;
+ } else
p = alloc_page(gfp_mask);
return p;
}
@@ -118,99 +125,142 @@ static void skb_xmit_done(struct virtqueue *svq)
netif_wake_queue(vi->dev);
}
-static void receive_skb(struct net_device *dev, struct sk_buff *skb,
- unsigned len)
+static void set_skb_frag(struct sk_buff *skb, struct page *page,
+ unsigned int offset, unsigned int *len)
{
- struct virtnet_info *vi = netdev_priv(dev);
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
- int err;
- int i;
+ int i = skb_shinfo(skb)->nr_frags;
+ skb_frag_t *f;
+
+ f = &skb_shinfo(skb)->frags[i];
+ f->size = min((unsigned)PAGE_SIZE - offset, *len);
+ f->page_offset = offset;
+ f->page = page;
+
+ skb->data_len += f->size;
+ skb->len += f->size;
+ skb_shinfo(skb)->nr_frags++;
+ *len -= f->size;
+}
- if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
- pr_debug("%s: short packet %i\n", dev->name, len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+ struct page *page, unsigned int len)
+{
+ struct sk_buff *skb;
+ struct skb_vnet_hdr *hdr;
+ unsigned int copy, hdr_len, offset;
+ char *p;
- if (vi->mergeable_rx_bufs) {
- unsigned int copy;
- char *p = page_address(skb_shinfo(skb)->frags[0].page);
+ p = page_address(page);
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
-
- memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
- p += sizeof(hdr->mhdr);
+ /* copy small packet so we can reuse these pages for small data */
+ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ if (unlikely(!skb))
+ return NULL;
- copy = len;
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ hdr = skb_vnet_hdr(skb);
- memcpy(skb_put(skb, copy), p, copy);
+ if (vi->mergeable_rx_bufs) {
+ hdr_len = sizeof hdr->mhdr;
+ offset = hdr_len;
+ } else {
+ hdr_len = sizeof hdr->hdr;
+ offset = sizeof(struct padded_vnet_hdr);
+ }
- len -= copy;
+ memcpy(hdr, p, hdr_len);
- if (!len) {
- give_a_page(vi, skb_shinfo(skb)->frags[0].page);
- skb_shinfo(skb)->nr_frags--;
- } else {
- skb_shinfo(skb)->frags[0].page_offset +=
- sizeof(hdr->mhdr) + copy;
- skb_shinfo(skb)->frags[0].size = len;
- skb->data_len += len;
- skb->len += len;
- }
+ len -= hdr_len;
+ p += offset;
- while (--hdr->mhdr.num_buffers) {
- struct sk_buff *nskb;
+ copy = len;
+ if (copy > skb_tailroom(skb))
+ copy = skb_tailroom(skb);
+ memcpy(skb_put(skb, copy), p, copy);
- i = skb_shinfo(skb)->nr_frags;
- if (i >= MAX_SKB_FRAGS) {
- pr_debug("%s: packet too long %d\n", dev->name,
- len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ len -= copy;
+ offset += copy;
- nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
- if (!nskb) {
- pr_debug("%s: rx error: %d buffers missing\n",
- dev->name, hdr->mhdr.num_buffers);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ while (len) {
+ set_skb_frag(skb, page, offset, &len);
+ page = (struct page *)page->private;
+ offset = 0;
+ }
- __skb_unlink(nskb, &vi->recv);
- vi->num--;
+ if (page)
+ give_pages(vi, page);
- skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
- skb_shinfo(nskb)->nr_frags = 0;
- kfree_skb(nskb);
+ return skb;
+}
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
+static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
+{
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct page *page;
+ int num_buf, i, len;
+
+ num_buf = hdr->mhdr.num_buffers;
+ while (--num_buf) {
+ i = skb_shinfo(skb)->nr_frags;
+ if (i >= MAX_SKB_FRAGS) {
+ pr_debug("%s: packet too long\n", skb->dev->name);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
- skb_shinfo(skb)->frags[i].size = len;
- skb_shinfo(skb)->nr_frags++;
- skb->data_len += len;
- skb->len += len;
+ page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ if (!page) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ skb->dev->name, hdr->mhdr.num_buffers);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
}
- } else {
- len -= sizeof(hdr->hdr);
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ set_skb_frag(skb, page, 0, &len);
- if (len <= MAX_PACKET_LEN)
- trim_pages(vi, skb);
+ --vi->num;
+ }
+ return 0;
+}
- err = pskb_trim(skb, len);
- if (err) {
- pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
- len, err);
+static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct page *page;
+ struct skb_vnet_hdr *hdr;
+
+ if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ dev->stats.rx_length_errors++;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ return;
+ }
+
+ if (!vi->mergeable_rx_bufs && !vi->big_packets) {
+ skb = buf;
+ len -= sizeof(struct virtio_net_hdr);
+ skb_trim(skb, len);
+ } else {
+ page = buf;
+ skb = page_to_skb(vi, page, len);
+ if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- goto drop;
+ give_pages(vi, page);
+ return;
}
+ if (vi->mergeable_rx_bufs)
+ if (receive_mergeable(vi, skb)) {
+ dev_kfree_skb(skb);
+ return;
+ }
}
+ hdr = skb_vnet_hdr(skb);
skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
@@ -267,110 +317,121 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
frame_err:
dev->stats.rx_frame_errors++;
-drop:
dev_kfree_skb(skb);
}
-static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
- struct scatterlist sg[2+MAX_SKB_FRAGS];
- int num, err, i;
- bool oom = false;
-
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
- do {
- struct skb_vnet_hdr *hdr;
+ struct skb_vnet_hdr *hdr;
+ struct scatterlist sg[2];
+ int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ sg_init_table(sg, 2);
+ skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ if (unlikely(!skb))
+ return -ENOMEM;
- skb_put(skb, MAX_PACKET_LEN);
+ skb_put(skb, MAX_PACKET_LEN);
- hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ hdr = skb_vnet_hdr(skb);
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
- if (vi->big_packets) {
- for (i = 0; i < MAX_SKB_FRAGS; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- f->page = get_a_page(vi, gfp);
- if (!f->page)
- break;
+ skb_to_sgvec(skb, sg + 1, 0, skb->len);
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ if (err < 0)
+ dev_kfree_skb(skb);
- skb->data_len += PAGE_SIZE;
- skb->len += PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
- }
+static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
+{
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+ struct page *first, *list = NULL;
+ char *p;
+ int i, err, offset;
+
+ sg_init_table(sg, MAX_SKB_FRAGS + 2);
+ /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ if (list)
+ give_pages(vi, list);
+ return -ENOMEM;
}
+ sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
- num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- skb_queue_head(&vi->recv, skb);
+ /* chain new page in list head to match sg */
+ first->private = (unsigned long)list;
+ list = first;
+ }
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
- if (err < 0) {
- skb_unlink(skb, &vi->recv);
- trim_pages(vi, skb);
- kfree_skb(skb);
- break;
- }
- vi->num++;
- } while (err >= num);
- if (unlikely(vi->num > vi->max))
- vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
- return !oom;
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ give_pages(vi, list);
+ return -ENOMEM;
+ }
+ p = page_address(first);
+
+ /* sg[0], sg[1] share the same page */
+ /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
+ sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+
+ /* sg[1] for data packet, from offset */
+ offset = sizeof(struct padded_vnet_hdr);
+ sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+
+ /* chain first in list head */
+ first->private = (unsigned long)list;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+ first);
+ if (err < 0)
+ give_pages(vi, first);
+
+ return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
-static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
- struct sk_buff *skb;
- struct scatterlist sg[1];
+ struct page *page;
+ struct scatterlist sg;
int err;
- bool oom = false;
-
- if (!vi->mergeable_rx_bufs)
- return try_fill_recv_maxbufs(vi, gfp);
- do {
- skb_frag_t *f;
+ page = get_a_page(vi, gfp);
+ if (!page)
+ return -ENOMEM;
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ sg_init_one(&sg, page_address(page), PAGE_SIZE);
- f = &skb_shinfo(skb)->frags[0];
- f->page = get_a_page(vi, gfp);
- if (!f->page) {
- oom = true;
- kfree_skb(skb);
- break;
- }
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ if (err < 0)
+ give_pages(vi, page);
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
+/* Returns false if we couldn't fill entirely (OOM). */
+static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+{
+ int err;
+ bool oom = false;
- sg_init_one(sg, page_address(f->page), PAGE_SIZE);
- skb_queue_head(&vi->recv, skb);
+ do {
+ if (vi->mergeable_rx_bufs)
+ err = add_recvbuf_mergeable(vi, gfp);
+ else if (vi->big_packets)
+ err = add_recvbuf_big(vi, gfp);
+ else
+ err = add_recvbuf_small(vi, gfp);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
if (err < 0) {
- skb_unlink(skb, &vi->recv);
- kfree_skb(skb);
+ oom = true;
break;
}
- vi->num++;
+ ++vi->num;
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
@@ -395,8 +456,7 @@ static void refill_work(struct work_struct *work)
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
- try_fill_recv(vi, GFP_KERNEL);
- still_empty = (vi->num == 0);
+ still_empty = !try_fill_recv(vi, GFP_KERNEL);
napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in
@@ -408,15 +468,14 @@ static void refill_work(struct work_struct *work)
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
- struct sk_buff *skb = NULL;
+ void *buf;
unsigned int len, received = 0;
again:
while (received < budget &&
- (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
- __skb_unlink(skb, &vi->recv);
- receive_skb(vi->dev, skb, len);
- vi->num--;
+ (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ receive_buf(vi->dev, buf, len);
+ --vi->num;
received++;
}
@@ -446,7 +505,6 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- __skb_unlink(skb, &vi->send);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
tot_sgs += skb_vnet_hdr(skb)->num_sg;
@@ -496,9 +554,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
+ sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
@@ -529,15 +587,6 @@ again:
}
vi->svq->vq_ops->kick(vi->svq);
- /*
- * Put new one in send queue. You'd expect we'd need this before
- * xmit_skb calls add_buf(), since the callback can be triggered
- * immediately after that. But since the callback just triggers
- * another call back here, normal network xmit locking prevents the
- * race.
- */
- __skb_queue_head(&vi->send, skb);
-
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
nf_reset(skb);
@@ -675,6 +724,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct virtio_net_ctrl_mac *mac_data;
struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
+ int uc_count;
+ int mc_count;
void *buf;
int i;
@@ -701,9 +752,12 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
+ uc_count = netdev_uc_count(dev);
+ mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
- mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
- (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
+ (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ mac_data = buf;
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
return;
@@ -712,24 +766,24 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
- mac_data->entries = dev->uc.count;
+ mac_data->entries = uc_count;
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list)
+ netdev_for_each_uc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
- sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
+ sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
/* multicast list and count fill the end */
- mac_data = (void *)&mac_data->macs[dev->uc.count][0];
+ mac_data = (void *)&mac_data->macs[uc_count][0];
- mac_data->entries = dev->mc_count;
- addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, addr = addr->next)
- memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
+ mac_data->entries = mc_count;
+ i = 0;
+ netdev_for_each_mc_addr(addr, dev)
+ memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
- sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
+ sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
@@ -916,10 +970,6 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_VLAN_FILTER;
}
- /* Initialize our empty receive and send queues. */
- skb_queue_head_init(&vi->recv);
- skb_queue_head_init(&vi->send);
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -952,26 +1002,42 @@ free:
return err;
}
+static void free_unused_bufs(struct virtnet_info *vi)
+{
+ void *buf;
+ while (1) {
+ buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ if (!buf)
+ break;
+ dev_kfree_skb(buf);
+ }
+ while (1) {
+ buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ if (!buf)
+ break;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ --vi->num;
+ }
+ BUG_ON(vi->num != 0);
+}
+
static void __devexit virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- struct sk_buff *skb;
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- /* Free our skbs in send and recv queues, if any. */
- while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
- kfree_skb(skb);
- vi->num--;
- }
- __skb_queue_purge(&vi->send);
-
- BUG_ON(vi->num != 0);
unregister_netdev(vi->dev);
cancel_delayed_work_sync(&vi->refill);
+ /* Free unused buffers in both send and recv, if any. */
+ free_unused_bufs(vi);
+
vdev->config->del_vqs(vi->vdev);
while (vi->pages)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d77..cff3485d9673 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
* PCI Device ID Table
* Last entry must be all 0s
*/
-static const struct pci_device_id vmxnet3_pciid_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
@@ -1668,22 +1668,19 @@ static u8 *
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
- u32 sz = netdev->mc_count * ETH_ALEN;
+ u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
- int i;
- struct dev_mc_list *mc = netdev->mc_list;
+ struct dev_mc_list *mc;
+ int i = 0;
- for (i = 0; i < netdev->mc_count; i++) {
- BUG_ON(!mc);
- memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
+ netdev_for_each_mc_addr(mc, netdev)
+ memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr,
ETH_ALEN);
- mc = mc->next;
- }
}
}
return buf;
@@ -1708,12 +1705,12 @@ vmxnet3_set_mc(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
- netdev->mc_count * ETH_ALEN);
+ netdev_mc_count(netdev) * ETH_ALEN);
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 32a75fa935ed..a21a25d218b6 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/slab.h>
#include "vxge-traffic.h"
#include "vxge-config.h"
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e7877df092f3..13f5416307f8 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -14,6 +14,7 @@
#ifndef VXGE_CONFIG_H
#define VXGE_CONFIG_H
#include <linux/list.h>
+#include <linux/slab.h>
#ifndef VXGE_CACHE_LINE_SIZE
#define VXGE_CACHE_LINE_SIZE 128
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index c6736b972635..aaf374cfd322 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -12,6 +12,7 @@
* Copyright(c) 2002-2009 Neterion Inc.
******************************************************************************/
#include<linux/ethtool.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index f1c4b2a1e867..ba6d0da78c30 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -43,6 +43,7 @@
#include <linux/if_vlan.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/tcp.h>
#include <net/ip.h>
#include <linux/netdevice.h>
@@ -54,7 +55,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
"Virtualized Server Adapter");
-static struct pci_device_id vxge_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -310,7 +311,7 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
rx_priv->data_size, PCI_DMA_FROMDEVICE);
- if (dma_addr == 0) {
+ if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
ring->stats.pci_map_fail++;
return -EIO;
}
@@ -1178,11 +1179,11 @@ static void vxge_set_multicast(struct net_device *dev)
memset(&mac_info, 0, sizeof(struct macInfo));
/* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && dev->mc_count) {
+ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
list_head = &vdev->vpaths[0].mac_addr_list;
- if ((dev->mc_count +
+ if ((netdev_mc_count(dev) +
(vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
vdev->vpaths[0].max_mac_addr_cnt)
goto _set_all_mcast;
@@ -1217,9 +1218,7 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
vpath_idx++) {
@@ -4087,21 +4086,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 64bit DMA", __func__);
high_dma = 1;
if (pci_set_consistent_dma_mask(pdev,
- 0xffffffffffffffffULL)) {
+ DMA_BIT_MASK(64))) {
vxge_debug_init(VXGE_ERR,
"%s : unable to obtain 64bit DMA for "
"consistent allocations", __func__);
ret = -ENOMEM;
goto _exit1;
}
- } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 32bit DMA", __func__);
} else {
@@ -4297,10 +4296,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
vdev->ndev->name, ll_config.device_hw_info.product_desc);
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
- vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
- macaddr[3], macaddr[4], macaddr[5]);
+ vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
+ vdev->ndev->name, macaddr);
vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index b36bf96eb502..f0bd70fb650c 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -811,7 +811,7 @@ static ssize_t cosa_read(struct file *file,
cosa_enable_rx(chan);
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->rxwaitq, &wait);
- while(!chan->rx_status) {
+ while (!chan->rx_status) {
current->state = TASK_INTERRUPTIBLE;
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
@@ -896,7 +896,7 @@ static ssize_t cosa_write(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->txwaitq, &wait);
- while(!chan->tx_status) {
+ while (!chan->tx_status) {
current->state = TASK_INTERRUPTIBLE;
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
@@ -1153,7 +1153,7 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
struct channel_data *channel, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
- switch(cmd) {
+ switch (cmd) {
case COSAIORSET: /* Reset the device */
if (!capable(CAP_NET_ADMIN))
return -EACCES;
@@ -1704,7 +1704,7 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
spin_unlock_irqrestore(&cosa->lock, flags);
return;
}
- while(1) {
+ while (1) {
cosa->txchan++;
i++;
if (cosa->txchan >= cosa->nchannels)
@@ -2010,7 +2010,7 @@ again:
static void debug_status_in(struct cosa_data *cosa, int status)
{
char *s;
- switch(status & SR_CMD_FROM_SRP_MASK) {
+ switch (status & SR_CMD_FROM_SRP_MASK) {
case SR_UP_REQUEST:
s = "RX_REQ";
break;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca4..a4859f7a7cc0 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -89,6 +89,7 @@
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <asm/system.h>
#include <asm/cache.h>
@@ -2050,7 +2051,7 @@ static int __init dscc4_setup(char *str)
__setup("dscc4.setup=", dscc4_setup);
#endif
-static struct pci_device_id dscc4_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
{ PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e3649157..e087b9a6daaa 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -20,6 +20,7 @@
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/if.h>
@@ -528,7 +529,7 @@ static int fst_debug_mask = { FST_DEBUG };
/*
* PCI ID lookup table
*/
-static struct pci_device_id fst_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 80114c93bae7..4dde2ea4a189 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -37,7 +37,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 84f01373e11f..aad9ed45c254 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -37,7 +37,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index f1bff98acd1f..ee7083fbea50 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -20,7 +20,6 @@
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#undef DEBUG_HARD_HEADER
@@ -141,7 +140,7 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
data->address != CISCO_UNICAST)
return cpu_to_be16(ETH_P_HDLC);
- switch(data->protocol) {
+ switch (data->protocol) {
case cpu_to_be16(ETH_P_IP):
case cpu_to_be16(ETH_P_IPX):
case cpu_to_be16(ETH_P_IPV6):
@@ -190,7 +189,7 @@ static int cisco_rx(struct sk_buff *skb)
cisco_data = (struct cisco_packet*)(skb->data + sizeof
(struct hdlc_header));
- switch(ntohl (cisco_data->type)) {
+ switch (ntohl (cisco_data->type)) {
case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
in_dev = dev->ip_ptr;
addr = 0;
@@ -245,8 +244,8 @@ static int cisco_rx(struct sk_buff *skb)
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
- } /* switch(keepalive type) */
- } /* switch(protocol) */
+ } /* switch (keepalive type) */
+ } /* switch (protocol) */
printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name,
ntohs(data->protocol));
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b9b9d6b01c0b..941f053e650e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev)
ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
}
+static void ppp_close(struct net_device *dev)
+{
+ ppp_tx_flush();
+}
+
static struct hdlc_proto proto = {
.start = ppp_start,
.stop = ppp_stop,
+ .close = ppp_close,
.type_trans = ppp_type_trans,
.ioctl = ppp_ioctl,
.netif_rx = ppp_rx,
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 19f51fdd5522..5dc153e8a29d 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -20,7 +20,6 @@
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 1b30fcc24145..05c9b0b96239 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
+#include <linux/gfp.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
@@ -21,7 +22,6 @@
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index aa9248f8eb1a..c7adbb79f7cc 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -10,6 +10,7 @@
*/
#include <linux/errno.h>
+#include <linux/gfp.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
@@ -21,7 +22,6 @@
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <net/x25device.h>
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
@@ -202,10 +202,10 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return 0; /* return protocol only, no settable parameters */
case IF_PROTO_X25:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(dev->flags & IFF_UP)
+ if (dev->flags & IFF_UP)
return -EBUSY;
result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 74164d29524c..48edc5f4dac8 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/hdlc.h>
#include <linux/ioport.h>
+#include <linux/slab.h>
#include <net/arp.h>
#include <asm/irq.h>
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index c705046d8615..0c2cdde686a0 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
+#include <linux/slab.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index d1e3c673e9b2..98e2f99903d7 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/net.h>
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c820..b27850377121 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
static int LMC_PKT_BUF_SZ = 1542;
-static struct pci_device_id lmc_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
PCI_VENDOR_ID_LMC, PCI_ANY_ID },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index f327674fc93a..5920c996fcdf 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -6,7 +6,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 044a48175c42..f600075e84a2 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -25,7 +25,6 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d3955420..3f744c643094 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -228,6 +228,7 @@ static char rcsid[] =
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <linux/if.h>
+#include <linux/slab.h>
#include <net/arp.h>
#include <asm/io.h>
@@ -251,7 +252,7 @@ static char rcsid[] =
#undef PC300_DEBUG_RX
#undef PC300_DEBUG_OTHER
-static struct pci_device_id cpc_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
/* PC300/RSV or PC300/X21, 2 chan */
{0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
/* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd94..c7ab3becd261 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf022..e2cff64a446a 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 25477b5cde47..cff13a9597cd 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -43,7 +43,6 @@
#include <linux/fcntl.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 61249f489e37..e91457d6023e 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -23,6 +23,7 @@
#include <linux/hdlc.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <net/arp.h>
#include <asm/irq.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624ee..541c700dceef 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id wanxl_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index b9f520b7db6a..80d5c5834a0b 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
+#include <linux/slab.h>
#include "x25_asy.h"
#include <net/x25device.h>
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 0be7ec7299db..fbf5e843d48c 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -47,6 +47,7 @@
#include <linux/hdlc.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/gfp.h>
#include <asm/dma.h>
#include <asm/io.h>
#define RT_LOCK
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 944945540391..6180772dcc09 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -76,6 +76,7 @@
#include <stdarg.h>
#include "i2400m.h"
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/wimax/i2400m.h>
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09de..94dc83c3969d 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -69,6 +69,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/suspend.h>
+#include <linux/slab.h>
#define D_SUBMODULE driver
#include "debug-levels.h"
@@ -301,24 +302,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
/* Extract MAC addresss */
ddi = (void *) skb->data;
BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
- d_printf(2, dev, "GET DEVICE INFO: mac addr "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
+ d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
+ ddi->mac_address);
if (!memcmp(net_dev->perm_addr, ddi->mac_address,
sizeof(ddi->mac_address)))
goto ok;
dev_warn(dev, "warning: device reports a different MAC address "
"to that of boot mode's\n");
- dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
- dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
- net_dev->perm_addr[0], net_dev->perm_addr[1],
- net_dev->perm_addr[2], net_dev->perm_addr[3],
- net_dev->perm_addr[4], net_dev->perm_addr[5]);
+ dev_warn(dev, "device reports %pM\n", ddi->mac_address);
+ dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
dev_err(dev, "device reports an invalid MAC address, "
"not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299ca..3f283bff0ff7 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -156,6 +156,7 @@
*/
#include <linux/firmware.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include "i2400m.h"
@@ -612,7 +613,7 @@ ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
goto error_wait_for_ack;
}
rx_bytes = result;
- /* verify the ack and read more if neccessary [result is the
+ /* verify the ack and read more if necessary [result is the
* final amount of bytes we get in the ack] */
result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags);
if (result < 0)
@@ -1041,21 +1042,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
dev_err(dev, "BM: read mac addr failed: %d\n", result);
goto error_read_mac;
}
- d_printf(2, dev,
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
if (i2400m->bus_bm_mac_addr_impaired == 1) {
ack_buf.ack_pl[0] = 0x00;
ack_buf.ack_pl[1] = 0x16;
ack_buf.ack_pl[2] = 0xd3;
get_random_bytes(&ack_buf.ack_pl[3], 3);
dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ "mac addr is %pM\n", ack_buf.ack_pl);
result = 0;
}
net_dev->addr_len = ETH_ALEN;
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 5cc0f279417e..2d7c96d7e865 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -151,6 +151,7 @@ enum {
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
+ USB_DEVICE_ID_I6050_2 = 0x0188,
};
@@ -234,6 +235,7 @@ struct i2400mu {
u8 rx_size_auto_shrink;
struct dentry *debugfs_dentry;
+ unsigned i6050:1; /* 1 if this is a 6050 based SKU */
};
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 04df9bbe340f..820b128705ec 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -627,7 +627,7 @@ enum i2400m_bm_cmd_flags {
* @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed
* directly to wait for a reboot barker from the device.
* @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
- * rom after reading the MAC adress. This is quite a dirty hack,
+ * rom after reading the MAC address. This is quite a dirty hack,
* if you ask me -- the device requires the bootrom to be
* intialized after reading the MAC address.
*/
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 599aa4eb9baa..b811c2f1f5e9 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -73,6 +73,7 @@
* alloc_netdev.
*/
#include <linux/if_arp.h>
+#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include "i2400m.h"
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 43927b5d7ad6..035e4cf3e6ed 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -34,6 +34,7 @@
*/
#include "i2400m.h"
#include <linux/wimax/i2400m.h>
+#include <linux/slab.h>
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 7ddb173fd4a7..fa2e11e5b4b9 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -144,6 +144,7 @@
* i2400m_msg_size_check
* wimax_msg
*/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index 8adf6c9b6f8f..d619da33f20b 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -65,6 +65,7 @@
#include <linux/skbuff.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/slab.h>
#include "i2400m-sdio.h"
#define D_SUBMODULE rx
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 76a50ac02ebb..7632f80954e3 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -48,6 +48,7 @@
* __i2400ms_send_barker()
*/
+#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
@@ -304,7 +305,7 @@ error_kzalloc:
*
* The device will be fully reset internally, but won't be
* disconnected from the bus (so no reenumeration will
- * happen). Firmware upload will be neccessary.
+ * happen). Firmware upload will be necessary.
*
* The device will send a reboot barker that will trigger the driver
* to reinitialize the state via __i2400m_dev_reset_handle.
@@ -314,7 +315,7 @@ error_kzalloc:
*
* The device will be fully reset internally, disconnected from the
* bus an a reenumeration will happen. Firmware upload will be
- * neccessary. Thus, we don't do any locking or struct
+ * necessary. Thus, we don't do any locking or struct
* reinitialization, as we are going to be fully disconnected and
* reenumerated.
*
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 54480e8947f1..b0cb90624cf6 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -244,6 +244,7 @@
* (FIFO empty).
*/
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include "i2400m.h"
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index ce6b9938fde0..b58ec56b86f8 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -73,6 +73,7 @@
* i2400m_notif_submit
*/
#include <linux/usb.h>
+#include <linux/gfp.h>
#include "i2400m-usb.h"
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index f88d1c6e35cb..7b6a1d98bd74 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -56,6 +56,7 @@
* i2400mu_rx_kick()
*/
#include <linux/usb.h>
+#include <linux/slab.h>
#include "i2400m-usb.h"
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index ba1b02362dfc..a26483a812a5 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -83,6 +83,7 @@
* i2400mu_rx_release() called from i2400mu_bus_dev_stop()
*/
#include <linux/workqueue.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include "i2400m-usb.h"
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 3b48681f8a0d..d8c4d6497fdf 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -66,6 +66,7 @@
#include "i2400m-usb.h"
#include <linux/wimax/i2400m.h>
#include <linux/debugfs.h>
+#include <linux/slab.h>
#define D_SUBMODULE usb
@@ -246,7 +247,7 @@ error_kzalloc:
*
* The device will be fully reset internally, but won't be
* disconnected from the USB bus (so no reenumeration will
- * happen). Firmware upload will be neccessary.
+ * happen). Firmware upload will be necessary.
*
* The device will send a reboot barker in the notification endpoint
* that will trigger the driver to reinitialize the state
@@ -257,7 +258,7 @@ error_kzalloc:
*
* The device will be fully reset internally, disconnected from the
* USB bus an a reenumeration will happen. Firmware upload will be
- * neccessary. Thus, we don't do any locking or struct
+ * necessary. Thus, we don't do any locking or struct
* reinitialization, as we are going to be fully disconnected and
* reenumerated.
*
@@ -478,7 +479,16 @@ int i2400mu_probe(struct usb_interface *iface,
i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
i2400m->bus_bm_mac_addr_impaired = 0;
- if (id->idProduct == USB_DEVICE_ID_I6050) {
+ switch (id->idProduct) {
+ case USB_DEVICE_ID_I6050:
+ case USB_DEVICE_ID_I6050_2:
+ i2400mu->i6050 = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (i2400mu->i6050) {
i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
i2400mu->endpoint_cfg.bulk_out = 0;
i2400mu->endpoint_cfg.notification = 3;
@@ -719,6 +729,7 @@ int i2400mu_post_reset(struct usb_interface *iface)
static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
{ USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 56dd6650c97a..588943660755 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -112,6 +112,7 @@ config AIRO_CS
depends on PCMCIA && (BROKEN || !M32R)
select WIRELESS_EXT
select WEXT_SPY
+ select WEXT_PRIV
select CRYPTO
select CRYPTO_AES
---help---
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 39410016b4ff..ab61d2b558d6 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -39,7 +40,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
module_param(tx_ring_size, uint, 0);
module_param(rx_ring_size, uint, 0);
-static struct pci_device_id adm8211_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
/* ADMtek ADM8211 */
{ PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
{ PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -302,18 +303,6 @@ static int adm8211_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct adm8211_priv *priv = dev->priv;
-
- stats[0].len = priv->cur_tx - priv->dirty_tx;
- stats[0].limit = priv->tx_ring_size - 2;
- stats[0].count = priv->dirty_tx;
-
- return 0;
-}
-
static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
@@ -1400,15 +1389,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
}
static int adm8211_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
if (priv->mode != NL80211_IFTYPE_MONITOR)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
return -EOPNOTSUPP;
@@ -1416,8 +1405,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
ADM8211_IDLE();
- ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr));
- ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
+ ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
adm8211_update_mode(dev);
@@ -1427,7 +1416,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
}
static void adm8211_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
priv->mode = NL80211_IFTYPE_MONITOR;
@@ -1773,7 +1762,6 @@ static const struct ieee80211_ops adm8211_ops = {
.prepare_multicast = adm8211_prepare_multicast,
.configure_filter = adm8211_configure_filter,
.get_stats = adm8211_get_stats,
- .get_tx_stats = adm8211_get_tx_stats,
.get_tsf = adm8211_get_tsft
};
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc6..dc5018a6d9ed 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -51,13 +51,14 @@
#include <linux/freezer.h>
#include <linux/ieee80211.h>
+#include <net/iw_handler.h>
#include "airo.h"
#define DRV_NAME "airo"
#ifdef CONFIG_PCI
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
{ 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
@@ -2310,7 +2311,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
airo_set_promisc(ai);
}
- if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
+ if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
/* Turn on multicast. (Should be already setup...) */
}
}
@@ -5254,11 +5255,8 @@ static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
WepKeyRid wkr;
int rc;
- if (keylen == 0) {
- airo_print_err(ai->dev->name, "%s: key length to set was zero",
- __func__);
+ if (WARN_ON(keylen == 0))
return -1;
- }
memset(&wkr, 0, sizeof(wkr));
wkr.len = cpu_to_le16(sizeof(wkr));
@@ -6405,11 +6403,7 @@ static int airo_set_encode(struct net_device *dev,
if (dwrq->length > MIN_KEY_SIZE)
key.len = MAX_KEY_SIZE;
else
- if (dwrq->length > 0)
- key.len = MIN_KEY_SIZE;
- else
- /* Disable the key */
- key.len = 0;
+ key.len = MIN_KEY_SIZE;
/* Check if the key is not marked as invalid */
if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
@@ -6590,12 +6584,22 @@ static int airo_set_encodeext(struct net_device *dev,
default:
return -EINVAL;
}
- /* Send the key to the card */
- rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set WEP key"
- " at index %d: %d.", idx, rc);
- return rc;
+ if (key.len == 0) {
+ rc = set_wep_tx_idx(local, idx, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP transmit index to %d: %d.",
+ idx, rc);
+ return rc;
+ }
+ } else {
+ rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP key at index %d: %d.",
+ idx, rc);
+ return rc;
+ }
}
}
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 2517364d3ebe..0fb419936dff 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1789,7 +1789,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
}
static int at76_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct at76_priv *priv = hw->priv;
int ret = 0;
@@ -1798,7 +1798,7 @@ static int at76_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mtx);
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
priv->iw_mode = IW_MODE_INFRA;
break;
@@ -1814,7 +1814,7 @@ exit:
}
static void at76_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
at76_dbg(DBG_MAC80211, "%s()", __func__);
}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 9f9459860d82..dc662b76a1c8 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,7 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_MAX_BA_RETRY 5
#define AR9170_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
@@ -143,7 +142,12 @@ struct ar9170_sta_tid {
u16 tid;
enum ar9170_tid_state state;
bool active;
- u8 retry;
+};
+
+struct ar9170_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
};
#define AR9170_QUEUE_TIMEOUT 64
@@ -154,12 +158,15 @@ struct ar9170_sta_tid {
#define AR9170_NUM_TX_STATUS 128
#define AR9170_NUM_TX_AGG_MAX 30
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
struct ath_common common;
struct mutex mutex;
enum ar9170_device_state state;
+ bool registered;
unsigned long bad_hw_nagger;
int (*open)(struct ar9170 *);
@@ -211,7 +218,7 @@ struct ar9170 {
/* qos queue settings */
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[5];
+ struct ar9170_tx_queue_stats tx_stats[5];
struct ieee80211_tx_queue_params edcf[5];
spinlock_t cmdlock;
@@ -248,13 +255,8 @@ struct ar9170_sta_info {
unsigned int ampdu_max_len;
};
-#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
-#define AR9170_TX_FLAG_NO_ACK BIT(1)
-#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
-
struct ar9170_tx_info {
unsigned long timeout;
- unsigned int flags;
};
#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 701ddb7d8400..0a1d4c28e68a 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -276,6 +276,7 @@ struct ar9170_tx_control {
#define AR9170_TX_MAC_RATE_PROBE 0x8000
/* either-or */
+#define AR9170_TX_PHY_MOD_MASK 0x00000003
#define AR9170_TX_PHY_MOD_CCK 0x00000000
#define AR9170_TX_PHY_MOD_OFDM 0x00000001
#define AR9170_TX_PHY_MOD_HT 0x00000002
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index ddc8c09dc79e..857e86104295 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -117,7 +117,7 @@ int ar9170_set_qos(struct ar9170 *ar)
ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
ar->edcf[0].txop | ar->edcf[1].txop << 16);
ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
- ar->edcf[1].txop | ar->edcf[3].txop << 16);
+ ar->edcf[2].txop | ar->edcf[3].txop << 16);
ar9170_regwrite_finish();
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f9d6db8d013e..c53692980990 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -38,6 +38,7 @@
*/
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
@@ -194,12 +195,15 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
+static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
+{
+ return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
static inline u16 ar9170_get_tid(struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *) txc->frame_data;
-
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+ return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
}
#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
@@ -213,10 +217,10 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
struct ieee80211_hdr *hdr = (void *) txc->frame_data;
- printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
+ printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
"mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
- ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr),
+ ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
jiffies_to_msecs(arinfo->timeout - jiffies));
}
@@ -391,7 +395,7 @@ static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
- for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) {
+ for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), i);
@@ -430,7 +434,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
spin_lock_irqsave(&ar->tx_stats_lock, flags);
ar->tx_stats[queue].len--;
- if (skb_queue_empty(&ar->tx_pending[queue])) {
+ if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), queue);
@@ -440,22 +444,17 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
}
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status[queue], skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: unsupported frame flags!\n",
- wiphy_name(ar->hw->wiphy));
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- dev_kfree_skb_any(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ar9170_tx_ampdu_callback(ar, skb);
+ } else {
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
+
+ skb_queue_tail(&ar->tx_status[queue], skb);
+ }
}
if (!ar->tx_stats[queue].len &&
@@ -1407,17 +1406,6 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
(is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
-
- goto out;
- }
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
/*
* WARNING:
* Putting the QoS queue bits into an unexplored territory is
@@ -1431,12 +1419,17 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
- } else {
- arinfo->flags = AR9170_TX_FLAG_NO_ACK;
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if (unlikely(!info->control.sta))
+ goto err_out;
+
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+ } else {
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
+ }
}
-out:
return 0;
err_out:
@@ -1671,8 +1664,7 @@ static bool ar9170_tx_ampdu(struct ar9170 *ar)
* tell the FW/HW that this is the last frame,
* that way it will wait for the immediate block ack.
*/
- if (likely(skb_peek_tail(&agg)))
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
+ ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
#ifdef AR9170_TXAGG_DEBUG
printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@ -1716,6 +1708,21 @@ static void ar9170_tx(struct ar9170 *ar)
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
+ skb_queue_len(&ar->tx_pending[i]));
+
+ if (remaining_space < frames) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
+ "remaining slots:%d, needed:%d\n",
+ wiphy_name(ar->hw->wiphy), i, remaining_space,
+ frames);
+#endif /* AR9170_QUEUE_DEBUG */
+ frames = remaining_space;
+ }
+
+ ar->tx_stats[i].len += frames;
+ ar->tx_stats[i].count += frames;
if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: queue %d full\n",
@@ -1733,25 +1740,8 @@ static void ar9170_tx(struct ar9170 *ar)
__ar9170_dump_txstats(ar);
#endif /* AR9170_QUEUE_STOP_DEBUG */
ieee80211_stop_queue(ar->hw, i);
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- continue;
}
- frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
- skb_queue_len(&ar->tx_pending[i]));
-
- if (remaining_space < frames) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
- "remaining slots:%d, needed:%d\n",
- wiphy_name(ar->hw->wiphy), i, remaining_space,
- frames);
-#endif /* AR9170_QUEUE_DEBUG */
- frames = remaining_space;
- }
-
- ar->tx_stats[i].len += frames;
- ar->tx_stats[i].count += frames;
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
if (!frames)
@@ -1773,7 +1763,7 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_inc(&ar->tx_ampdu_pending);
#ifdef AR9170_QUEUE_DEBUG
@@ -1784,7 +1774,7 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_pending);
frames_failed++;
@@ -1950,7 +1940,7 @@ err_free:
}
static int ar9170_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
@@ -1963,8 +1953,8 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
goto unlock;
}
- ar->vif = conf->vif;
- memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
+ ar->vif = vif;
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
ar->rx_software_decryption = true;
@@ -1984,7 +1974,7 @@ unlock:
}
static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
@@ -2340,55 +2330,55 @@ out:
return err;
}
-static void ar9170_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static int ar9170_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct ar9170 *ar = hw->priv;
struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
unsigned int i;
- switch (cmd) {
- case STA_NOTIFY_ADD:
- memset(sta_info, 0, sizeof(*sta_info));
+ memset(sta_info, 0, sizeof(*sta_info));
- if (!sta->ht_cap.ht_supported)
- break;
+ if (!sta->ht_cap.ht_supported)
+ return 0;
- if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
- ar->global_ampdu_density = sta->ht_cap.ampdu_density;
+ if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
+ ar->global_ampdu_density = sta->ht_cap.ampdu_density;
- if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
- ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
+ if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
+ ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
- sta_info->agg[i].active = false;
- sta_info->agg[i].ssn = 0;
- sta_info->agg[i].retry = 0;
- sta_info->agg[i].tid = i;
- INIT_LIST_HEAD(&sta_info->agg[i].list);
- skb_queue_head_init(&sta_info->agg[i].queue);
- }
+ for (i = 0; i < AR9170_NUM_TID; i++) {
+ sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
+ sta_info->agg[i].active = false;
+ sta_info->agg[i].ssn = 0;
+ sta_info->agg[i].tid = i;
+ INIT_LIST_HEAD(&sta_info->agg[i].list);
+ skb_queue_head_init(&sta_info->agg[i].queue);
+ }
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
- break;
+ sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
- case STA_NOTIFY_REMOVE:
- if (!sta->ht_cap.ht_supported)
- break;
+ return 0;
+}
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
- skb_queue_purge(&sta_info->agg[i].queue);
- }
+static int ar9170_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
+ unsigned int i;
- break;
+ if (!sta->ht_cap.ht_supported)
+ return 0;
- default:
- break;
+ for (i = 0; i < AR9170_NUM_TID; i++) {
+ sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
+ skb_queue_purge(&sta_info->agg[i].queue);
}
+
+ return 0;
}
static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -2408,18 +2398,6 @@ static int ar9170_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *tx_stats)
-{
- struct ar9170 *ar = hw->priv;
-
- spin_lock_bh(&ar->tx_stats_lock);
- memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
- spin_unlock_bh(&ar->tx_stats_lock);
-
- return 0;
-}
-
static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
@@ -2519,9 +2497,9 @@ static const struct ieee80211_ops ar9170_ops = {
.bss_info_changed = ar9170_op_bss_info_changed,
.get_tsf = ar9170_op_get_tsf,
.set_key = ar9170_set_key,
- .sta_notify = ar9170_sta_notify,
+ .sta_add = ar9170_sta_add,
+ .sta_remove = ar9170_sta_remove,
.get_stats = ar9170_get_stats,
- .get_tx_stats = ar9170_get_tx_stats,
.ampdu_action = ar9170_ampdu_action,
};
@@ -2535,7 +2513,7 @@ void *ar9170_alloc(size_t priv_size)
/*
* this buffer is used for rx stream reconstruction.
* Under heavy load this device (or the transport layer?)
- * tends to split the streams into seperate rx descriptors.
+ * tends to split the streams into separate rx descriptors.
*/
skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
@@ -2724,7 +2702,8 @@ int ar9170_register(struct ar9170 *ar, struct device *pdev)
dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
wiphy_name(ar->hw->wiphy));
- return err;
+ ar->registered = true;
+ return 0;
err_unreg:
ieee80211_unregister_hw(ar->hw);
@@ -2735,11 +2714,14 @@ err_out:
void ar9170_unregister(struct ar9170 *ar)
{
+ if (ar->registered) {
#ifdef CONFIG_AR9170_LEDS
- ar9170_unregister_leds(ar);
+ ar9170_unregister_leds(ar);
#endif /* CONFIG_AR9170_LEDS */
- kfree_skb(ar->rx_failover);
ieee80211_unregister_hw(ar->hw);
+ }
+
+ kfree_skb(ar->rx_failover);
mutex_destroy(&ar->mutex);
}
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d924057..99a6da464bd3 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -38,6 +38,7 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
@@ -84,6 +85,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cde, 0x0023) },
/* Z-Com UB82 ABG */
{ USB_DEVICE(0x0cde, 0x0026) },
+ /* Sphairon Homelink 1202 */
+ { USB_DEVICE(0x0cde, 0x0027) },
/* Arcadyan WN7512 */
{ USB_DEVICE(0x083a, 0xf522) },
/* Planex GWUS300 */
@@ -92,6 +95,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x04bb, 0x093f) },
/* AVM FRITZ!WLAN USB Stick N */
{ USB_DEVICE(0x057C, 0x8401) },
+ /* NEC WL300NU-G */
+ { USB_DEVICE(0x0409, 0x0249) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
{ USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
@@ -414,7 +419,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
spin_unlock_irqrestore(&aru->common.cmdlock, flags);
usb_fill_int_urb(urb, aru->udev,
- usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
+ usb_sndintpipe(aru->udev, AR9170_EP_CMD),
aru->common.cmdbuf, plen + 4,
ar9170_usb_tx_urb_complete, NULL, 1);
@@ -580,43 +585,6 @@ static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data,
return 0;
}
-static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
-{
- int err = 0;
-
- err = request_firmware(&aru->firmware, "ar9170.fw",
- &aru->udev->dev);
- if (!err) {
- aru->init_values = NULL;
- return 0;
- }
-
- if (aru->req_one_stage_fw) {
- dev_err(&aru->udev->dev, "ar9170.fw firmware file "
- "not found and is required for this device\n");
- return -EINVAL;
- }
-
- dev_err(&aru->udev->dev, "ar9170.fw firmware file "
- "not found, trying old firmware...\n");
-
- err = request_firmware(&aru->init_values, "ar9170-1.fw",
- &aru->udev->dev);
- if (err) {
- dev_err(&aru->udev->dev, "file with init values not found.\n");
- return err;
- }
-
- err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
- if (err) {
- release_firmware(aru->init_values);
- dev_err(&aru->udev->dev, "firmware file not found.\n");
- return err;
- }
-
- return err;
-}
-
static int ar9170_usb_reset(struct ar9170_usb *aru)
{
int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING);
@@ -755,6 +723,103 @@ err_out:
return err;
}
+static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
+{
+ struct device *parent = aru->udev->dev.parent;
+
+ /* unbind anything failed */
+ if (parent)
+ down(&parent->sem);
+ device_release_driver(&aru->udev->dev);
+ if (parent)
+ up(&parent->sem);
+}
+
+static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
+{
+ struct ar9170_usb *aru = context;
+ int err;
+
+ aru->firmware = fw;
+
+ if (!fw) {
+ dev_err(&aru->udev->dev, "firmware file not found.\n");
+ goto err_freefw;
+ }
+
+ err = ar9170_usb_init_device(aru);
+ if (err)
+ goto err_freefw;
+
+ err = ar9170_usb_open(&aru->common);
+ if (err)
+ goto err_unrx;
+
+ err = ar9170_register(&aru->common, &aru->udev->dev);
+
+ ar9170_usb_stop(&aru->common);
+ if (err)
+ goto err_unrx;
+
+ return;
+
+ err_unrx:
+ ar9170_usb_cancel_urbs(aru);
+
+ err_freefw:
+ ar9170_usb_firmware_failed(aru);
+}
+
+static void ar9170_usb_firmware_inits(const struct firmware *fw,
+ void *context)
+{
+ struct ar9170_usb *aru = context;
+ int err;
+
+ if (!fw) {
+ dev_err(&aru->udev->dev, "file with init values not found.\n");
+ ar9170_usb_firmware_failed(aru);
+ return;
+ }
+
+ aru->init_values = fw;
+
+ /* ok so we have the init values -- get code for two-stage */
+
+ err = request_firmware_nowait(THIS_MODULE, 1, "ar9170-2.fw",
+ &aru->udev->dev, GFP_KERNEL, aru,
+ ar9170_usb_firmware_finish);
+ if (err)
+ ar9170_usb_firmware_failed(aru);
+}
+
+static void ar9170_usb_firmware_step2(const struct firmware *fw, void *context)
+{
+ struct ar9170_usb *aru = context;
+ int err;
+
+ if (fw) {
+ ar9170_usb_firmware_finish(fw, context);
+ return;
+ }
+
+ if (aru->req_one_stage_fw) {
+ dev_err(&aru->udev->dev, "ar9170.fw firmware file "
+ "not found and is required for this device\n");
+ ar9170_usb_firmware_failed(aru);
+ return;
+ }
+
+ dev_err(&aru->udev->dev, "ar9170.fw firmware file "
+ "not found, trying old firmware...\n");
+
+ err = request_firmware_nowait(THIS_MODULE, 1, "ar9170-1.fw",
+ &aru->udev->dev, GFP_KERNEL, aru,
+ ar9170_usb_firmware_inits);
+ if (err)
+ ar9170_usb_firmware_failed(aru);
+}
+
static bool ar9170_requires_one_stage(const struct usb_device_id *id)
{
if (!id->driver_info)
@@ -812,33 +877,9 @@ static int ar9170_usb_probe(struct usb_interface *intf,
if (err)
goto err_freehw;
- err = ar9170_usb_request_firmware(aru);
- if (err)
- goto err_freehw;
-
- err = ar9170_usb_init_device(aru);
- if (err)
- goto err_freefw;
-
- err = ar9170_usb_open(ar);
- if (err)
- goto err_unrx;
-
- err = ar9170_register(ar, &udev->dev);
-
- ar9170_usb_stop(ar);
- if (err)
- goto err_unrx;
-
- return 0;
-
-err_unrx:
- ar9170_usb_cancel_urbs(aru);
-
-err_freefw:
- release_firmware(aru->init_values);
- release_firmware(aru->firmware);
-
+ return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw",
+ &aru->udev->dev, GFP_KERNEL, aru,
+ ar9170_usb_firmware_step2);
err_freehw:
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
@@ -858,12 +899,12 @@ static void ar9170_usb_disconnect(struct usb_interface *intf)
ar9170_unregister(&aru->common);
ar9170_usb_cancel_urbs(aru);
- release_firmware(aru->init_values);
- release_firmware(aru->firmware);
-
usb_put_dev(aru->udev);
usb_set_intfdata(intf, NULL);
ieee80211_free_hw(aru->common.hw);
+
+ release_firmware(aru->init_values);
+ release_firmware(aru->firmware);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 9e05648356fe..71fc960814f0 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -74,7 +74,6 @@ struct ath_common;
struct ath_bus_ops {
void (*read_cachesize)(struct ath_common *common, int *csz);
- void (*cleanup)(struct ath_common *common);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
};
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a96761111..ac67f02e26d8 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -535,13 +535,12 @@ struct ath5k_txq_info {
u32 tqi_cbr_period; /* Constant bit rate period */
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Not used */
+ u32 tqi_ready_time; /* Time queue waits after an event */
};
/*
* Transmit packet types.
* used on tx control descriptor
- * TODO: Use them inside base.c corectly
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@@ -1063,6 +1062,7 @@ struct ath5k_hw {
u32 ah_cw_min;
u32 ah_cw_max;
u32 ah_limit_tx_retries;
+ u8 ah_coverage_class;
/* Antenna Control */
u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1200,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
/* Protocol Control Unit Functions */
extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
+extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1232,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
+/* Clock rate related functions */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1315,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
* Functions used internaly
*/
-/*
- * Translate usec to hw clock units
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
-{
- return turbo ? (usec * 80) : (usec * 40);
-}
-
-/*
- * Translate hw clock units to usec
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
-{
- return turbo ? (clock / 80) : (clock / 40);
-}
-
static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
{
return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 42284445b75e..dc0786cc2639 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -21,6 +21,7 @@
\*************************************/
#include <linux/pci.h>
+#include <linux/slab.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a4c086f069b1..3abbe7513ab5 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -50,6 +50,7 @@
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
#include <net/ieee80211_radiotap.h>
@@ -83,7 +84,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
/* Known PCI ids */
-static const struct pci_device_id ath5k_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -225,9 +226,9 @@ static int ath5k_reset_wake(struct ath5k_softc *sc);
static int ath5k_start(struct ieee80211_hw *hw);
static void ath5k_stop(struct ieee80211_hw *hw);
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static void ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mc_list);
@@ -241,8 +242,6 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -254,6 +253,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
u32 changes);
static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class);
static const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
@@ -267,13 +268,13 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
.conf_tx = NULL,
- .get_tx_stats = ath5k_get_tx_stats,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
.reset_tsf = ath5k_reset_tsf,
.bss_info_changed = ath5k_bss_info_changed,
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
+ .set_coverage_class = ath5k_set_coverage_class,
};
/*
@@ -1246,6 +1247,29 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
return 0;
}
+static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath5k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = AR5K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = AR5K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = AR5K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = AR5K_PKT_TYPE_PSPOLL;
+ else
+ htype = AR5K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
struct ath5k_txq *txq)
@@ -1300,7 +1324,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
+ ieee80211_get_hdrlen_from_skb(skb),
+ get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
@@ -1329,7 +1354,6 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
spin_lock_bh(&txq->lock);
list_add_tail(&bf->list, &txq->q);
- sc->tx_stats[txq->qnum].len++;
if (txq->link == NULL) /* is this first packet? */
ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
else /* no, so only link it */
@@ -1513,7 +1537,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
if (ret)
- return ret;
+ goto err;
+
if (sc->opmode == NL80211_IFTYPE_AP ||
sc->opmode == NL80211_IFTYPE_MESH_POINT) {
/*
@@ -1540,10 +1565,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
if (ret) {
ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
"hardware queue!\n", __func__);
- return ret;
+ goto err;
}
+ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
+ if (ret)
+ goto err;
+
+ /* reconfigure cabq with ready time to 80% of beacon_interval */
+ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
- return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
+ qi.tqi_ready_time = (sc->bintval * 80) / 100;
+ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
+err:
+ return ret;
}
static void
@@ -1562,7 +1602,6 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
ath5k_txbuf_free(sc, bf);
spin_lock_bh(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock_bh(&sc->txbuflock);
@@ -1903,17 +1942,6 @@ accept:
rxs->noise = sc->ah->ah_noise_floor;
rxs->signal = rxs->noise + rs.rs_rssi;
- /* An rssi of 35 indicates you should be able use
- * 54 Mbps reliably. A more elaborate scheme can be used
- * here but it requires a map of SNR/throughput for each
- * possible mode used */
- rxs->qual = rs.rs_rssi * 100 / 35;
-
- /* rssi can be more than 35 though, anything above that
- * should be considered at 100% */
- if (rxs->qual > 100)
- rxs->qual = 100;
-
rxs->antenna = rs.rs_antenna;
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -2003,10 +2031,8 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
}
ieee80211_tx_status(sc->hw, skb);
- sc->tx_stats[txq->qnum].count++;
spin_lock(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock(&sc->txbuflock);
@@ -2381,6 +2407,9 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
+ /* Set PHY calibration interval */
+ ah->ah_cal_intval = ath5k_calinterval;
+
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@@ -2408,10 +2437,6 @@ ath5k_init(struct ath5k_softc *sc)
/* Set ack to be sent at low bit-rates */
ath5k_hw_set_ack_bitrate_high(ah, false);
-
- /* Set PHY calibration inteval */
- ah->ah_cal_intval = ath5k_calinterval;
-
ret = 0;
done:
mmiowb();
@@ -2785,7 +2810,7 @@ static void ath5k_stop(struct ieee80211_hw *hw)
}
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
int ret;
@@ -2796,22 +2821,22 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
- sc->vif = conf->vif;
+ sc->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_MONITOR:
- sc->opmode = conf->type;
+ sc->opmode = vif->type;
break;
default:
ret = -EOPNOTSUPP;
goto end;
}
- ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
+ ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
ret = 0;
@@ -2822,13 +2847,13 @@ end:
static void
ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
u8 mac[ETH_ALEN] = {};
mutex_lock(&sc->lock);
- if (sc->vif != conf->vif)
+ if (sc->vif != vif)
goto end;
ath5k_hw_set_lladdr(sc->ah, mac);
@@ -3109,17 +3134,6 @@ ath5k_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int
-ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct ath5k_softc *sc = hw->priv;
-
- memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats));
-
- return 0;
-}
-
static u64
ath5k_get_tsf(struct ieee80211_hw *hw)
{
@@ -3274,3 +3288,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
+
+/**
+ * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @hw: struct ieee80211_hw pointer
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
+ * coverage class. The values are persistent, they are restored after device
+ * reset.
+ */
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ mutex_lock(&sc->lock);
+ ath5k_hw_set_coverage_class(sc->ah, coverage_class);
+ mutex_unlock(&sc->lock);
+}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 952b3a21bbc3..7e1a88a5abdb 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -117,7 +117,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 5d1c8677f180..67665cdc7afe 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -21,6 +21,8 @@
* EEPROM access functions and helpers *
\*************************************/
+#include <linux/slab.h>
+
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
@@ -97,7 +99,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int ret;
u16 val;
- u32 cksum, offset;
+ u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
/*
* Read values from EEPROM and store them in the capability structure
@@ -116,12 +118,38 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
*/
- for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
+ if (val) {
+ eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
+ AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
+ eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
+
+ /*
+ * Fail safe check to prevent stupid loops due
+ * to busted EEPROMs. XXX: This value is likely too
+ * big still, waiting on a better value.
+ */
+ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
+ ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
+ "%d (0x%04x) max expected: %d (0x%04x)\n",
+ eep_max, eep_max,
+ 3 * AR5K_EEPROM_INFO_MAX,
+ 3 * AR5K_EEPROM_INFO_MAX);
+ return -EIO;
+ }
+ }
+
+ for (cksum = 0, offset = 0; offset < eep_max; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
+ ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
+ "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
+ cksum, eep_max,
+ eep_max == AR5K_EEPROM_INFO_MAX ?
+ "default size" : "custom size");
return -EIO;
}
@@ -403,8 +431,8 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
AR5K_EEPROM_READ(o++, val);
- ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
- ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
+ ee->ee_i_cal[mode] = (val >> 5) & 0x3f;
+ ee->ee_q_cal[mode] = val & 0x1f;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
AR5K_EEPROM_READ(o++, val);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 0123f3521a0b..473a483bb9c3 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -37,6 +37,14 @@
#define AR5K_EEPROM_RFKILL_POLARITY_S 1
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
+
+/* FLASH(EEPROM) Defines for AR531X chips */
+#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
+#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
+#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
+#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
+#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
+
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 60f547503d75..67aa52e9bf94 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,6 +77,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
/* HP Compaq C700 (nitrousnrg@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+ /* LiteOn AR5BXB63 (magooz@salug.it) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
/* IBM-specific AR5212 (all others) */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
/* Dell Vostro A860 (shahar@shahar-or.co.il) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d9..aefe84f9c04b 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
}
/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
}
/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
+ * ath5k_hw_htoclock - Translate usec to hw clock units
+ *
+ * @ah: The &struct ath5k_hw
+ * @usec: value in microseconds
+ */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+{
+ return usec * ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * @clock: value in hw clock units
+ */
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+{
+ return clock / ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_get_clockrate - Get the clock rate for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ int clock;
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ clock = 40; /* 802.11a */
+ else if (channel->hw_value & CHANNEL_CCK)
+ clock = 22; /* 802.11b */
+ else
+ clock = 44; /* 802.11g */
+
+ /* Clock rate in turbo modes is twice the normal rate */
+ if (channel->hw_value & CHANNEL_TURBO)
+ clock *= 2;
+
+ return clock;
+}
+
+/**
+ * ath5k_hw_get_default_slottime - Get the default slot time for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 6; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_CCK)
+ return 20; /* 802.11b */
+
+ return 9; /* 802.11 a/g */
+}
+
+/**
+ * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 8; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ return 16; /* 802.11a */
+
+ return 10; /* 802.11 b/g */
+}
+
+/**
* ath5k_hw_set_lladdr - Set station id
*
* @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
return 0;
}
+/**
+ * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @ah: The &struct ath5k_hw
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Sets slot time, ACK timeout and CTS timeout for given coverage class.
+ */
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+{
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
+ int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
+ int cts_timeout = ack_timeout;
+
+ ath5k_hw_set_slot_time(ah, slot_time);
+ ath5k_hw_set_ack_timeout(ah, ack_timeout);
+ ath5k_hw_set_cts_timeout(ah, cts_timeout);
+
+ ah->ah_coverage_class = coverage_class;
+}
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 72474c0ccaff..68e2bccd90d3 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -23,6 +23,7 @@
#define _ATH5K_PHY
#include <linux/delay.h>
+#include <linux/slab.h>
#include "ath5k.h"
#include "reg.h"
@@ -1386,38 +1387,39 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
goto done;
/* Calibration has finished, get the results and re-run */
+
+ /* work around empty results which can apparently happen on 5212 */
for (i = 0; i <= 10; i++) {
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ "iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
+ if (i_pwr && q_pwr)
+ break;
}
i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
q_coffd = q_pwr >> 7;
- /* No correction */
- if (i_coffd == 0 || q_coffd == 0)
+ /* protect against divide by 0 and loss of sign bits */
+ if (i_coffd == 0 || q_coffd < 2)
goto done;
- i_coff = ((-iq_corr) / i_coffd);
-
- /* Boundary check */
- if (i_coff > 31)
- i_coff = 31;
- if (i_coff < -32)
- i_coff = -32;
+ i_coff = (-iq_corr) / i_coffd;
+ i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
- q_coff = (((s32)i_pwr / q_coffd) - 128);
+ q_coff = (i_pwr / q_coffd) - 128;
+ q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
- /* Boundary check */
- if (q_coff > 15)
- q_coff = 15;
- if (q_coff < -16)
- q_coff = -16;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ "new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
+ i_coff, q_coff, i_coffd, q_coffd);
- /* Commit new I/Q value */
- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE |
- ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S));
+ /* Commit new I/Q values (set enable bit last to match HAL sources) */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF, i_coff);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF, q_coff);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
/* Re-enable calibration -if we don't we'll commit
* the same values again and again */
@@ -1873,7 +1875,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
break;
case AR5K_ANTMODE_FIXED_A:
def_ant = 1;
- tx_ant = 0;
+ tx_ant = 1;
use_def_for_tx = true;
update_def_on_tx = false;
use_def_for_rts = true;
@@ -1882,7 +1884,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
break;
case AR5K_ANTMODE_FIXED_B:
def_ant = 2;
- tx_ant = 0;
+ tx_ant = 2;
use_def_for_tx = true;
update_def_on_tx = false;
use_def_for_rts = true;
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef206..9122a8556f45 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
break;
case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_DIS |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
- ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
(AR5K_TUNE_SW_BEACON_RESP -
AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
@@ -520,12 +521,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
*/
unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
{
+ unsigned int slot_time_clock;
+
ATH5K_TRACE(ah->ah_sc);
+
if (ah->ah_version == AR5K_AR5210)
- return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
- AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
else
- return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
+
+ return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
}
/*
@@ -533,15 +538,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
{
+ u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+
ATH5K_TRACE(ah->ah_sc);
- if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
+
+ if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
- ah->ah_turbo), AR5K_SLOT_TIME);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
else
- ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 4cb9c5df9f46..1464f89b249c 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -2187,6 +2187,7 @@
*/
#define AR5K_PHY_IQ 0x9920 /* Register Address */
#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */
+#define AR5K_PHY_IQ_CORR_Q_Q_COFF_S 0
#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */
#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5
#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc77869..cbf28e379843 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
!(channel->hw_value & CHANNEL_OFDM));
/* Get coefficient
- * ALGO: coef = (5 * clock * carrier_freq) / 2)
+ * ALGO: coef = (5 * clock / carrier_freq) / 2
* we scale coef by shifting clock value by 24 for
* better precision since we use integers */
/* TODO: Half/quarter rate */
- clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO);
-
+ clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
/* Get exponent
@@ -852,12 +851,15 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1,
AR5K_INIT_CYCRSSI_THR1);
- /* I/Q correction
- * TODO: Per channel i/q infos ? */
- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
- AR5K_PHY_IQ_CORR_ENABLE |
- (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
- ee->ee_q_cal[ee_mode]);
+ /* I/Q correction (set enable bit last to match HAL sources) */
+ /* TODO: Per channel i/q infos ? */
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF,
+ ee->ee_i_cal[ee_mode]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF,
+ ee->ee_q_cal[ee_mode]);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
+ }
/* Heavy clipping -disable for now */
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
@@ -1317,6 +1319,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Restore antenna mode */
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+ /* Restore slot time and ACK timeouts */
+ if (ah->ah_coverage_class > 0)
+ ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
+
/*
* Configure QCUs/DCUs
*/
@@ -1371,15 +1377,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
* exists */
- if (ah->ah_version == AR5K_AR5212)
- ath5k_hw_set_sleep_clock(ah, true);
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_op_mode != NL80211_IFTYPE_AP)
+ ath5k_hw_set_sleep_clock(ah, true);
/*
- * Disable beacons and reset the register
+ * Disable beacons and reset the TSF
*/
- AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE |
- AR5K_BEACON_RESET_TSF);
-
+ AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
+ ath5k_hw_reset_tsf(ah);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 03a1106ad725..5774cea23a3b 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -25,7 +25,7 @@ config ATH9K
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K
+ depends on ATH9K && DEBUG_FS
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a9..6b50d5eb9ec3 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
ath9k-y += beacon.o \
+ gpio.o \
+ init.o \
main.o \
recv.o \
xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137ab..ca4994f13151 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -27,12 +27,6 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
*csz = L1_CACHE_BYTES >> 2;
}
-static void ath_ahb_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *)common->priv;
- iounmap(sc->mem);
-}
-
static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_softc *sc = (struct ath_softc *)common->priv;
@@ -54,8 +48,6 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
static struct ath_bus_ops ath_ahb_bus_ops = {
.read_cachesize = ath_ahb_read_cachesize,
- .cleanup = ath_ahb_cleanup,
-
.eeprom_read = ath_ahb_eeprom_read,
};
@@ -121,16 +113,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
- ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
+
+ ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
+ dev_err(&pdev->dev, "request_irq failed\n");
goto err_free_hw;
}
- ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- goto err_detach;
+ dev_err(&pdev->dev, "failed to initialize device\n");
+ goto err_irq;
}
ah = sc->sc_ah;
@@ -143,8 +138,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
return 0;
- err_detach:
- ath_detach(sc);
+ err_irq:
+ free_irq(irq, sc);
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
@@ -161,8 +156,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (hw) {
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
- ath_cleanup(sc);
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+ iounmap(mem);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index e2cef2ff5d8f..83c7ea4c007f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -33,11 +33,11 @@ struct ath_node;
/* Macro to expand scalars to 64-bit objects */
-#define ito64(x) (sizeof(x) == 8) ? \
+#define ito64(x) (sizeof(x) == 1) ? \
(((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 16) ? \
+ (sizeof(x) == 2) ? \
(((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 32) ? \
+ ((sizeof(x) == 4) ? \
(((unsigned long long int)(x)) & 0xffffffff) : \
(unsigned long long int)(x))
@@ -267,6 +267,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
+void ath9k_enable_ps(struct ath_softc *sc);
/********/
/* VIFs */
@@ -341,6 +342,12 @@ int ath_beaconq_config(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+void ath_ani_calibrate(unsigned long data);
+
+/**********/
+/* BTCOEX */
+/**********/
+
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
ATH_BTCOEX_NO_STOMP,
@@ -358,9 +365,14 @@ struct ath_btcoex {
int bt_stomp_type; /* Types of BT stomping */
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
+ u32 btscan_no_stomp; /* in usec */
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
};
+int ath_init_btcoex_timer(struct ath_softc *sc);
+void ath9k_btcoex_timer_resume(struct ath_softc *sc);
+void ath9k_btcoex_timer_pause(struct ath_softc *sc);
+
/********************/
/* LED Control */
/********************/
@@ -385,6 +397,9 @@ struct ath_led {
bool registered;
};
+void ath_init_leds(struct ath_softc *sc);
+void ath_deinit_leds(struct ath_softc *sc);
+
/********************/
/* Main driver core */
/********************/
@@ -403,26 +418,29 @@ struct ath_led {
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0)
-#define SC_OP_BEACONS BIT(1)
-#define SC_OP_RXAGGR BIT(2)
-#define SC_OP_TXAGGR BIT(3)
-#define SC_OP_FULL_RESET BIT(4)
-#define SC_OP_PREAMBLE_SHORT BIT(5)
-#define SC_OP_PROTECT_ENABLE BIT(6)
-#define SC_OP_RXFLUSH BIT(7)
-#define SC_OP_LED_ASSOCIATED BIT(8)
-#define SC_OP_WAIT_FOR_BEACON BIT(12)
-#define SC_OP_LED_ON BIT(13)
-#define SC_OP_SCANNING BIT(14)
-#define SC_OP_TSF_RESET BIT(15)
-#define SC_OP_WAIT_FOR_CAB BIT(16)
-#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
-#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
-#define SC_OP_BEACON_SYNC BIT(19)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(21)
-#define SC_OP_NULLFUNC_COMPLETED BIT(22)
-#define SC_OP_PS_ENABLED BIT(23)
+#define SC_OP_INVALID BIT(0)
+#define SC_OP_BEACONS BIT(1)
+#define SC_OP_RXAGGR BIT(2)
+#define SC_OP_TXAGGR BIT(3)
+#define SC_OP_FULL_RESET BIT(4)
+#define SC_OP_PREAMBLE_SHORT BIT(5)
+#define SC_OP_PROTECT_ENABLE BIT(6)
+#define SC_OP_RXFLUSH BIT(7)
+#define SC_OP_LED_ASSOCIATED BIT(8)
+#define SC_OP_LED_ON BIT(9)
+#define SC_OP_SCANNING BIT(10)
+#define SC_OP_TSF_RESET BIT(11)
+#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
+#define SC_OP_BT_SCAN BIT(13)
+
+/* Powersave flags */
+#define PS_WAIT_FOR_BEACON BIT(0)
+#define PS_WAIT_FOR_CAB BIT(1)
+#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
+#define PS_WAIT_FOR_TX_ACK BIT(3)
+#define PS_BEACON_SYNC BIT(4)
+#define PS_NULLFUNC_COMPLETED BIT(5)
+#define PS_ENABLED BIT(6)
struct ath_wiphy;
struct ath_rate_table;
@@ -453,16 +471,17 @@ struct ath_softc {
int irq;
spinlock_t sc_resetlock;
spinlock_t sc_serial_rw;
- spinlock_t ani_lock;
spinlock_t sc_pm_lock;
struct mutex mutex;
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
+ u16 ps_flags; /* PS_* */
u16 curtxpow;
u8 nbcnvifs;
u16 nvifs;
bool ps_enabled;
+ bool ps_idle;
unsigned long ps_usecount;
enum ath9k_int imask;
@@ -509,6 +528,7 @@ struct ath_wiphy {
int chan_is_ht;
};
+void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -519,21 +539,16 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
-static inline void ath_bus_cleanup(struct ath_common *common)
-{
- common->bus_ops->cleanup(common);
-}
-
extern struct ieee80211_ops ath9k_ops;
+extern int modparam_nohwcrypt;
irqreturn_t ath_isr(int irq, void *dev);
-void ath_cleanup(struct ath_softc *sc);
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
-void ath_detach(struct ath_softc *sc);
+void ath9k_deinit_device(struct ath_softc *sc);
const char *ath_mac_bb_name(u32 mac_bb_version);
const char *ath_rf_name(u16 rf_version);
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -542,6 +557,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -583,4 +599,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
+
+void ath_start_rfkill_poll(struct ath_softc *sc);
+extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1660ef17aaf5..b4a31a43a62c 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -62,7 +62,7 @@ int ath_beaconq_config(struct ath_softc *sc)
* Beacons are always sent out at the lowest rate, and are not retried.
*/
static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
- struct ath_buf *bf)
+ struct ath_buf *bf, int rateidx)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ath_hw *ah = sc->sc_ah;
@@ -96,9 +96,9 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
ds->ds_data = bf->bf_buf_addr;
sband = &sc->sbands[common->hw->conf.channel->band];
- rate = sband->bitrates[0].hw_value;
+ rate = sband->bitrates[rateidx].hw_value;
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
- rate |= sband->bitrates[0].hw_value_short;
+ rate |= sband->bitrates[rateidx].hw_value_short;
ath9k_hw_set11n_txdesc(ah, ds, skb->len + FCS_LEN,
ATH9K_PKT_TYPE_BEACON,
@@ -206,7 +206,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
}
}
- ath_beacon_setup(sc, avp, bf);
+ ath_beacon_setup(sc, avp, bf, info->control.rates[0].idx);
while (skb) {
ath_tx_cabq(hw, skb);
@@ -237,7 +237,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
bf = avp->av_bcbuf;
skb = bf->bf_mpdu;
- ath_beacon_setup(sc, avp, bf);
+ ath_beacon_setup(sc, avp, bf, 0);
/* NB: caller is known to have already stopped tx dma */
ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
@@ -480,7 +480,8 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.updateslot = COMMIT; /* commit next beacon */
sc->beacon.slotupdate = slot;
} else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
- ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime);
+ ah->slottime = sc->beacon.slottime;
+ ath9k_hw_init_global_settings(ah);
sc->beacon.updateslot = OK;
}
if (bfaddr != 0) {
@@ -525,16 +526,13 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
{
u32 nexttbtt, intval;
- /* Configure the timers only when the TSF has to be reset */
-
- if (!(sc->sc_flags & SC_OP_TSF_RESET))
- return;
-
/* NB: the beacon interval is kept internally in TU's */
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
intval /= ATH_BCBUF; /* for staggered beacons */
nexttbtt = intval;
- intval |= ATH9K_BEACON_RESET_TSF;
+
+ if (sc->sc_flags & SC_OP_TSF_RESET)
+ intval |= ATH9K_BEACON_RESET_TSF;
/*
* In AP mode we enable the beacon timers and SWBA interrupts to
@@ -576,6 +574,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
u64 tsf;
int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ /* No need to configure beacon if we are not associated */
+ if (!common->curaid) {
+ ath_print(common, ATH_DBG_BEACON,
+ "STA is not yet associated..skipping beacon config\n");
+ return;
+ }
+
memset(&bs, 0, sizeof(bs));
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
@@ -738,7 +743,6 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
enum nl80211_iftype iftype;
/* Setup the beacon configuration parameters */
-
if (vif) {
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 1ba31a73317c..1ee5a15ccbb1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -25,10 +25,12 @@
#define ATH_BTCOEX_DEF_BT_PERIOD 45
#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
+#define ATH_BTCOEX_BTSCAN_DUTY_CYCLE 90
#define ATH_BTCOEX_BMISS_THRESH 50
#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */
#define ATH_BT_CNT_THRESHOLD 3
+#define ATH_BT_CNT_SCAN_THRESHOLD 15
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b66f72dbf7b9..081e0085ed4c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/slab.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -75,17 +76,24 @@ static const struct file_operations fops_debug = {
#endif
+#define DMA_BUF_LEN 1024
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
- char buf[1024];
+ char *buf;
+ int retval;
unsigned int len = 0;
u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
int i, qcuOffset = 0, dcuOffset = 0;
u32 *qcuBase = &val[0], *dcuBase = &val[4];
+ buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
ath9k_ps_wakeup(sc);
REG_WRITE_D(ah, AR_MACMISC,
@@ -93,20 +101,20 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
i, val[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
@@ -120,7 +128,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"%2d %2x %1x %2x %2x\n",
i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
@@ -128,35 +136,37 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return retval;
}
static const struct file_operations fops_dma = {
@@ -289,23 +299,49 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
if (sc->cur_rate_table == NULL)
return 0;
- max = 80 + sc->cur_rate_table->rate_cnt * 64;
+ max = 80 + sc->cur_rate_table->rate_cnt * 1024;
buf = kmalloc(max + 1, GFP_KERNEL);
if (buf == NULL)
return 0;
buf[max] = 0;
- len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success",
- "Retries", "XRetries", "PER");
+ len += sprintf(buf, "%6s %6s %6s "
+ "%10s %10s %10s %10s\n",
+ "HT", "MCS", "Rate",
+ "Success", "Retries", "XRetries", "PER");
for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
+ char mcs[5];
+ char htmode[5];
+ int used_mcs = 0, used_htmode = 0;
+
+ if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
+ used_mcs = snprintf(mcs, 5, "%d",
+ sc->cur_rate_table->info[i].ratecode);
+
+ if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT40");
+ else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT20");
+ else
+ used_htmode = snprintf(htmode, 5, "????");
+ }
+
+ mcs[used_mcs] = '\0';
+ htmode[used_htmode] = '\0';
len += snprintf(buf + len, max - len,
- "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000,
- (ratekbps % 1000) / 100, stats->success,
- stats->retries, stats->xretries,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
stats->per);
}
@@ -554,6 +590,116 @@ static const struct file_operations fops_xmit = {
.owner = THIS_MODULE
};
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
+
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1152;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "CRC ERR",
+ sc->debug.stats.rxstats.crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT CRC ERR",
+ sc->debug.stats.rxstats.decrypt_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PHY ERR",
+ sc->debug.stats.rxstats.phy_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "MIC ERR",
+ sc->debug.stats.rxstats.mic_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PRE-DELIM CRC ERR",
+ sc->debug.stats.rxstats.pre_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "POST-DELIM CRC ERR",
+ sc->debug.stats.rxstats.post_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT BUSY ERR",
+ sc->debug.stats.rxstats.decrypt_busy_err);
+
+ PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+{
+#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
+#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
+
+ struct ath_desc *ds = bf->bf_desc;
+ u32 phyerr;
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ RX_STAT_INC(crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ RX_STAT_INC(decrypt_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ RX_STAT_INC(mic_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_STAT_INC(pre_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_STAT_INC(post_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_STAT_INC(decrypt_busy_err);
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ RX_STAT_INC(phy_err);
+ phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ RX_PHY_ERR_INC(phyerr);
+ }
+
+#undef RX_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -606,6 +752,13 @@ int ath9k_init_debug(struct ath_hw *ah)
if (!sc->debug.debugfs_xmit)
goto err;
+ sc->debug.debugfs_recv = debugfs_create_file("recv",
+ S_IRUSR,
+ sc->debug.debugfs_phy,
+ sc, &fops_recv);
+ if (!sc->debug.debugfs_recv)
+ goto err;
+
return 0;
err:
ath9k_exit_debug(ah);
@@ -617,6 +770,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
+ debugfs_remove(sc->debug.debugfs_recv);
debugfs_remove(sc->debug.debugfs_xmit);
debugfs_remove(sc->debug.debugfs_wiphy);
debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee11..86780e68b31e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
u32 delim_underrun;
};
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ */
+struct ath_rx_stats {
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
+ struct ath_rx_stats rxstats;
};
struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
struct dentry *debugfs_rcstat;
struct dentry *debugfs_wiphy;
struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
struct ath_stats stats;
};
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
{
}
+static inline void ath_debug_stat_rx(struct ath_softc *sc,
+ struct ath_buf *bf)
+{
+}
+
static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per)
{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 000000000000..deab8beb0680
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/********************************/
+/* LED functions */
+/********************************/
+
+static void ath_led_blink_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ ath_led_blink_work.work);
+
+ if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
+ return;
+
+ if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work,
+ (sc->sc_flags & SC_OP_LED_ON) ?
+ msecs_to_jiffies(sc->led_off_duration) :
+ msecs_to_jiffies(sc->led_on_duration));
+
+ sc->led_on_duration = sc->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ sc->led_off_duration = sc->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ sc->led_on_cnt = sc->led_off_cnt = 0;
+ if (sc->sc_flags & SC_OP_LED_ON)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ else
+ sc->sc_flags |= SC_OP_LED_ON;
+}
+
+static void ath_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath_softc *sc = led->sc;
+
+ switch (brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ } else {
+ sc->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ sc->sc_flags |= SC_OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ sc->sc_flags |= SC_OP_LED_ON;
+ } else {
+ sc->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->sc = sc;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+ return ret;
+}
+
+static void ath_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath_deinit_leds(struct ath_softc *sc)
+{
+ ath_unregister_led(&sc->assoc_led);
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ ath_unregister_led(&sc->tx_led);
+ ath_unregister_led(&sc->rx_led);
+ ath_unregister_led(&sc->radio_led);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+}
+
+void ath_init_leds(struct ath_softc *sc)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+ else
+ sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(sc->hw);
+ snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->radio_led, trigger);
+ sc->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(sc->hw);
+ snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->assoc_led, trigger);
+ sc->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(sc->hw);
+ snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->tx_led, trigger);
+ sc->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(sc->hw);
+ snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->rx_led, trigger);
+ sc->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&sc->ath_led_blink_work);
+ ath_deinit_leds(sc);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
+ ah->rfkill_polarity;
+}
+
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ bool blocked = !!ath_is_rfkill_set(sc);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath_start_rfkill_poll(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(sc->hw->wiphy);
+}
+
+/******************/
+/* BTCOEX */
+/******************/
+
+/*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
+ btcoex->bt_priority_cnt++;
+
+ if (time_after(jiffies, btcoex->bt_priority_time +
+ msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ /* Detect if colocated bt started scanning */
+ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT scan detected");
+ sc->sc_flags |= (SC_OP_BT_SCAN |
+ SC_OP_BT_PRIORITY_DETECTED);
+ } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT priority traffic detected");
+ sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+ }
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ }
+}
+
+/*
+ * Configures appropriate weight based on stomp type.
+ */
+static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
+ enum ath_stomp_type stomp_type)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ switch (stomp_type) {
+ case ATH_BTCOEX_STOMP_ALL:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_ALL_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_LOW:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_NONE:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_NONE_WLAN_WGHT);
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Invalid Stomptype\n");
+ break;
+ }
+
+ ath9k_hw_btcoex_enable(ah);
+}
+
+static void ath9k_gen_timer_start(struct ath_hw *ah,
+ struct ath_gen_timer *timer,
+ u32 timer_next,
+ u32 timer_period)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
+
+ if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ ath9k_hw_gen_timer_stop(ah, timer);
+
+ /* if no timer is enabled, turn off interrupt mask */
+ if (timer_table->timer_mask.val == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ u32 timer_period;
+ bool is_btscan;
+
+ ath_detect_bt_priority(sc);
+
+ is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL :
+ btcoex->bt_stomp_type);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ timer_period = is_btscan ? btcoex->btscan_no_stomp :
+ btcoex->btcoex_no_stomp;
+ ath9k_gen_timer_start(ah,
+ btcoex->no_stomp_timer,
+ (ath9k_hw_gettsf32(ah) +
+ timer_period), timer_period * 10);
+ btcoex->hw_timer_enabled = true;
+ }
+
+ mod_timer(&btcoex->period_timer, jiffies +
+ msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+}
+
+/*
+ * Generic tsf based hw timer which configures weight
+ * registers to time slice between wlan and bt traffic
+ */
+static void ath_btcoex_no_stomp_timer(void *arg)
+{
+ struct ath_softc *sc = (struct ath_softc *)arg;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "no stomp timer running \n");
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+}
+
+int ath_init_btcoex_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+
+ setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
+ (unsigned long) sc);
+
+ spin_lock_init(&btcoex->btcoex_lock);
+
+ btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
+ ath_btcoex_no_stomp_timer,
+ ath_btcoex_no_stomp_timer,
+ (void *) sc, AR_FIRST_NDP_TIMER);
+
+ if (!btcoex->no_stomp_timer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * (Re)start btcoex timers
+ */
+void ath9k_btcoex_timer_resume(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Starting btcoex timers");
+
+ /* make sure duty cycle timer is also stopped when resuming */
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+
+ mod_timer(&btcoex->period_timer, jiffies);
+}
+
+
+/*
+ * Pause btcoex timer and bt duty cycle timer
+ */
+void ath9k_btcoex_timer_pause(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ del_timer_sync(&btcoex->period_timer);
+
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ btcoex->hw_timer_enabled = false;
+}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2ec61f08cfdb..78b571129c92 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -15,6 +15,7 @@
*/
#include <linux/io.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
#include "hw.h"
@@ -52,28 +53,6 @@ module_exit(ath9k_exit);
/* Helper Functions */
/********************/
-static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (!ah->curchan) /* should really check for CCK instead */
- return clks / ATH9K_CLOCK_RATE_CCK;
- if (conf->channel->band == IEEE80211_BAND_2GHZ)
- return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
-
- return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
-}
-
-static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (conf_is_ht40(conf))
- return ath9k_hw_mac_usec(ah, clks) / 2;
- else
- return ath9k_hw_mac_usec(ah, clks);
-}
-
static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -343,30 +322,6 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
return true;
}
-static const char *ath9k_hw_devname(u16 devid)
-{
- switch (devid) {
- case AR5416_DEVID_PCI:
- return "Atheros 5416";
- case AR5416_DEVID_PCIE:
- return "Atheros 5418";
- case AR9160_DEVID_PCI:
- return "Atheros 9160";
- case AR5416_AR9100_DEVID:
- return "Atheros 9100";
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- return "Atheros 9280";
- case AR9285_DEVID_PCIE:
- return "Atheros 9285";
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- return "Atheros 9287";
- }
-
- return NULL;
-}
-
static void ath9k_hw_init_config(struct ath_hw *ah)
{
int i;
@@ -380,7 +335,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
- ah->config.ht_enable = 1;
ah->config.ofdm_trig_low = 200;
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
@@ -392,7 +346,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
- ah->config.intr_mitigation = true;
+ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
+ ah->config.ht_enable = 1;
+ else
+ ah->config.ht_enable = 0;
+
+ ah->config.rx_intr_mitigation = true;
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -437,8 +396,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->beacon_interval = 100;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = (u32) -1;
- ah->acktimeout = (u32) -1;
- ah->ctstimeout = (u32) -1;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
}
@@ -590,6 +547,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
case AR5416_DEVID_AR9287_PCI:
case AR5416_DEVID_AR9287_PCIE:
case AR9271_USB:
+ case AR2427_DEVID_PCIE:
return true;
default:
break;
@@ -855,12 +813,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
}
}
-static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
+static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
{
u32 i, j;
- if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
- test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
+ if (ah->hw_version.devid == AR9280_DEVID_PCI) {
/* EEPROM Fixup */
for (i = 0; i < ah->iniModes.ia_rows; i++) {
@@ -980,7 +937,7 @@ int ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- ath9k_hw_init_11a_eeprom_fix(ah);
+ ath9k_hw_init_eeprom_fix(ah);
r = ath9k_hw_init_macaddr(ah);
if (r) {
@@ -1184,7 +1141,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
ah->mask_reg |= AR_IMR_RXOK;
@@ -1204,34 +1161,25 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
}
}
-static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad ack timeout %u\n", us);
- ah->acktimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
- ah->acktimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) 0xFFFF);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
}
-static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad cts timeout %u\n", us);
- ah->ctstimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
- ah->ctstimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
+}
+
+static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
}
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
@@ -1248,31 +1196,48 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
}
}
-static void ath9k_hw_init_user_settings(struct ath_hw *ah)
+void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+ int acktimeout;
+ int slottime;
+ int sifstime;
+
ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (ah->misc_mode != 0)
REG_WRITE(ah, AR_PCU_MISC,
REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
- if (ah->slottime != (u32) -1)
- ath9k_hw_setslottime(ah, ah->slottime);
- if (ah->acktimeout != (u32) -1)
- ath9k_hw_set_ack_timeout(ah, ah->acktimeout);
- if (ah->ctstimeout != (u32) -1)
- ath9k_hw_set_cts_timeout(ah, ah->ctstimeout);
+
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
+ sifstime = 16;
+ else
+ sifstime = 10;
+
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ slottime = ah->slottime + 3 * ah->coverage_class;
+ acktimeout = slottime + sifstime;
+
+ /*
+ * Workaround for early ACK timeouts, add an offset to match the
+ * initval's 64us ack timeout value.
+ * This was initially only meant to work around an issue with delayed
+ * BA frames in some implementations, but it has been found to fix ACK
+ * timeout issues in other cases as well.
+ */
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
+ acktimeout += 64 - sifstime - ah->slottime;
+
+ ath9k_hw_setslottime(ah, slottime);
+ ath9k_hw_set_ack_timeout(ah, acktimeout);
+ ath9k_hw_set_cts_timeout(ah, acktimeout);
if (ah->globaltxtimeout != (u32) -1)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
}
+EXPORT_SYMBOL(ath9k_hw_init_global_settings);
-const char *ath9k_hw_probe(u16 vendorid, u16 devid)
-{
- return vendorid == ATHEROS_VENDOR_ID ?
- ath9k_hw_devname(devid) : NULL;
-}
-
-void ath9k_hw_detach(struct ath_hw *ah)
+void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1290,7 +1255,7 @@ free_hw:
kfree(ah);
ah = NULL;
}
-EXPORT_SYMBOL(ath9k_hw_detach);
+EXPORT_SYMBOL(ath9k_hw_deinit);
/*******/
/* INI */
@@ -1346,6 +1311,16 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
* Necessary to avoid issues on AR5416 2.0
*/
REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
}
static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
@@ -2091,7 +2066,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
ath9k_enable_rfkill(ah);
- ath9k_hw_init_user_settings(ah);
+ ath9k_hw_init_global_settings(ah);
if (AR_SREV_9287_12_OR_LATER(ah)) {
REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
@@ -2121,7 +2096,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_OBS, 8);
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
@@ -2781,7 +2756,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
*masked = isr & ATH9K_INT_COMMON;
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
*masked |= ATH9K_INT_RX;
}
@@ -2914,7 +2889,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
}
if (ints & ATH9K_INT_RX) {
mask |= AR_IMR_RXERR;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
else
mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3688,21 +3663,6 @@ u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
}
EXPORT_SYMBOL(ath9k_hw_extend_tsf);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
-{
- if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad slot time %u\n", us);
- ah->slottime = (u32) -1;
- return false;
- } else {
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
- ah->slottime = us;
- return true;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_setslottime);
-
void ath9k_hw_set11nmac2040(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e2b0c73a616f..dbbf7ca5f97d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -40,6 +40,7 @@
#define AR9280_DEVID_PCI 0x0029
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
+#define AR2427_DEVID_PCIE 0x002c
#define AR5416_AR9100_DEVID 0x000b
@@ -212,7 +213,7 @@ struct ath9k_ops_config {
u32 cck_trig_low;
u32 enable_ani;
int serialize_regmode;
- bool intr_mitigation;
+ bool rx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -551,10 +552,9 @@ struct ath_hw {
u32 *bank6Temp;
int16_t txpower_indexoffset;
+ int coverage_class;
u32 beacon_interval;
u32 slottime;
- u32 acktimeout;
- u32 ctstimeout;
u32 globaltxtimeout;
/* ANI */
@@ -616,7 +616,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
-void ath9k_hw_detach(struct ath_hw *ah);
+void ath9k_hw_deinit(struct ath_hw *ah);
int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
@@ -668,7 +668,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
+void ath9k_hw_init_global_settings(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 000000000000..3d4d897add6d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,865 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "ath9k.h"
+
+static char *dev_info = "ath9k";
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+/* We use the hw_value as an index into our private channel structure */
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static void ath9k_deinit_softc(struct ath_softc *sc);
+
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests.
+ */
+
+static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ iowrite32(val, sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ iowrite32(val, sc->mem + reg_offset);
+}
+
+static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ u32 val;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ val = ioread32(sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = ioread32(sc->mem + reg_offset);
+ return val;
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_ioread32,
+ .write = ath9k_iowrite32,
+};
+
+/**************************/
+/* Initialization */
+/**************************/
+
+static void setup_ht_cap(struct ath_softc *sc,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u8 tx_streams, rx_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
+ 1 : 2;
+ rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
+ 1 : 2;
+
+ if (tx_streams != rx_streams) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ ht_info->mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ ht_info->mcs.rx_mask[1] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+
+ return ath_reg_notifier_apply(wiphy, request, reg);
+}
+
+/*
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
+ struct list_head *head, const char *name,
+ int nbuf, int ndesc)
+{
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_desc *ds;
+ struct ath_buf *bf;
+ int i, bsize, error;
+
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ name, nbuf, ndesc);
+
+ INIT_LIST_HEAD(head);
+ /* ath_desc must be a multiple of DWORDs */
+ if ((sizeof(struct ath_desc) % 4) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "ath_desc not DWORD aligned\n");
+ BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ u32 ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u32 dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ };
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (dd->dd_desc == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ ds = dd->dd_desc;
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+
+ /* allocate buffers */
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = kzalloc(bsize, GFP_KERNEL);
+ if (bf == NULL) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+ dd->dd_bufptr = bf;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += ndesc;
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ return 0;
+fail2:
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+fail:
+ memset(dd, 0, sizeof(*dd));
+ return error;
+#undef ATH_DESC_4KB_BOUND_CHECK
+#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
+#undef DS2PHYS
+}
+
+static void ath9k_init_crypto(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = sc->sc_ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(sc->sc_ah, (u16) i);
+
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+
+}
+
+static int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ int r, qnum;
+
+ switch (sc->sc_ah->btcoex_hw.scheme) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_init_2wire(sc->sc_ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_init_3wire(sc->sc_ah);
+ r = ath_init_btcoex_timer(sc);
+ if (r)
+ return -1;
+ qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+static int ath9k_init_queues(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
+ sc->tx.hwq_map[i] = -1;
+
+ sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
+ if (sc->beacon.beaconq == -1) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup a beacon xmit queue\n");
+ goto err;
+ }
+
+ sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
+ if (sc->beacon.cabq == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup CAB xmit queue\n");
+ goto err;
+ }
+
+ sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
+ ath_cabq_update(sc);
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ return -EIO;
+}
+
+static void ath9k_init_channels_rates(struct ath_softc *sc)
+{
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
+ sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
+ sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+}
+
+static void ath9k_init_misc(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
+ setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+
+ sc->config.txpowlimit = ATH_TXPOWER_MAX;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ sc->sc_flags |= SC_OP_TXAGGR;
+ sc->sc_flags |= SC_OP_RXAGGR;
+ }
+
+ common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
+ common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
+
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ sc->beacon.slottime = ATH9K_SLOT_TIME_9;
+
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ sc->beacon.bslot[i] = NULL;
+ sc->beacon.bslot_aphy[i] = NULL;
+ }
+}
+
+static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, i;
+ int csz = 0;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = subsysid;
+ sc->sc_ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = bus_ops;
+ common->ah = ah;
+ common->hw = sc->hw;
+ common->priv = sc;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&sc->wiphy_lock);
+ spin_lock_init(&sc->sc_resetlock);
+ spin_lock_init(&sc->sc_serial_rw);
+ spin_lock_init(&sc->sc_pm_lock);
+ mutex_init(&sc->mutex);
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
+ (unsigned long)sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(sc);
+ if (ret)
+ goto err_queues;
+
+ ret = ath9k_init_btcoex(sc);
+ if (ret)
+ goto err_btcoex;
+
+ ath9k_init_crypto(sc);
+ ath9k_init_channels_rates(sc);
+ ath9k_init_misc(sc);
+
+ return 0;
+
+err_btcoex:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+err_queues:
+ ath9k_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(ah);
+ sc->sc_ah = NULL;
+
+ return ret;
+}
+
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+ if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->max_rates = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->max_rate_tries = 10;
+ hw->sta_data_size = sizeof(struct ath_node);
+ hw->vif_data_size = sizeof(struct ath_vif);
+
+ hw->rate_control_algorithm = "ath9k_rate_control";
+
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &sc->sbands[IEEE80211_BAND_5GHZ];
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
+ if (error != 0)
+ goto error_init;
+
+ ah = sc->sc_ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(sc, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto error_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX DMA */
+ error = ath_tx_init(sc, ATH_TXBUF);
+ if (error != 0)
+ goto error_tx;
+
+ /* Setup RX DMA */
+ error = ath_rx_init(sc, ATH_RXBUF);
+ if (error != 0)
+ goto error_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto error_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto error_world;
+ }
+
+ INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
+ INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
+ sc->wiphy_scheduler_int = msecs_to_jiffies(500);
+
+ ath_init_leds(sc);
+ ath_start_rfkill_poll(sc);
+
+ return 0;
+
+error_world:
+ ieee80211_unregister_hw(hw);
+error_register:
+ ath_rx_cleanup(sc);
+error_rx:
+ ath_tx_cleanup(sc);
+error_tx:
+ /* Nothing */
+error_regd:
+ ath9k_deinit_softc(sc);
+error_init:
+ return error;
+}
+
+/*****************************/
+/* De-Initialization */
+/*****************************/
+
+static void ath9k_deinit_softc(struct ath_softc *sc)
+{
+ int i = 0;
+
+ if ((sc->btcoex.no_stomp_timer) &&
+ sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ ath9k_exit_debug(sc->sc_ah);
+ ath9k_hw_deinit(sc->sc_ah);
+
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+}
+
+void ath9k_deinit_device(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ int i = 0;
+
+ ath9k_ps_wakeup(sc);
+
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
+ ath_deinit_leds(sc);
+
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ if (aphy == NULL)
+ continue;
+ sc->sec_wiphy[i] = NULL;
+ ieee80211_unregister_hw(aphy->hw);
+ ieee80211_free_hw(aphy->hw);
+ }
+ kfree(sc->sec_wiphy);
+
+ ieee80211_unregister_hw(hw);
+ ath_rx_cleanup(sc);
+ ath_tx_cleanup(sc);
+ ath9k_deinit_softc(sc);
+}
+
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head)
+{
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+
+ INIT_LIST_HEAD(head);
+ kfree(dd->dd_bufptr);
+ memset(dd, 0, sizeof(*dd));
+}
+
+/************************/
+/* Module Hooks */
+/************************/
+
+static int __init ath9k_init(void)
+{
+ int error;
+
+ /* Register rate control algorithm */
+ error = ath_rate_control_register();
+ if (error != 0) {
+ printk(KERN_ERR
+ "ath9k: Unable to register rate control "
+ "algorithm: %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = ath9k_debug_create_root();
+ if (error) {
+ printk(KERN_ERR
+ "ath9k: Unable to create debugfs root: %d\n",
+ error);
+ goto err_rate_unregister;
+ }
+
+ error = ath_pci_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k: No PCI devices found, driver not installed.\n");
+ error = -ENODEV;
+ goto err_remove_root;
+ }
+
+ error = ath_ahb_init();
+ if (error < 0) {
+ error = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ return 0;
+
+ err_pci_exit:
+ ath_pci_exit();
+
+ err_remove_root:
+ ath9k_debug_remove_root();
+ err_rate_unregister:
+ ath_rate_control_unregister();
+ err_out:
+ return error;
+}
+module_init(ath9k_init);
+
+static void __exit ath9k_exit(void)
+{
+ ath_ahb_exit();
+ ath_pci_exit();
+ ath9k_debug_remove_root();
+ ath_rate_control_unregister();
+ printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 71b84d91dcff..efc420cd42bf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
wait = wait_time;
while (ath9k_hw_numtxpending(ah, q)) {
if ((--wait) == 0) {
- ath_print(common, ATH_DBG_QUEUE,
+ ath_print(common, ATH_DBG_FATAL,
"Failed to stop TX DMA in 100 "
"msec after killing last frame\n");
break;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0c87771383f0..29851e6376a9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -77,6 +77,9 @@
#define ATH9K_TXERR_XTXOP 0x08
#define ATH9K_TXERR_TIMER_EXPIRED 0x10
#define ATH9K_TX_ACKED 0x20
+#define ATH9K_TXERR_MASK \
+ (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
+ ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
#define ATH9K_TX_BA 0x01
#define ATH9K_TX_PWRMGMT 0x02
@@ -164,6 +167,40 @@ struct ath_rx_status {
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+enum ath9k_phyerr {
+ ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
+ ATH9K_PHYERR_TIMING = 1, /* Timing error */
+ ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
+ ATH9K_PHYERR_RATE = 3, /* Illegal rate */
+ ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
+ ATH9K_PHYERR_RADAR = 5, /* Radar detect */
+ ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
+ ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
+
+ ATH9K_PHYERR_OFDM_TIMING = 17,
+ ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
+ ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
+ ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
+ ATH9K_PHYERR_OFDM_POWER_DROP = 21,
+ ATH9K_PHYERR_OFDM_SERVICE = 22,
+ ATH9K_PHYERR_OFDM_RESTART = 23,
+ ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
+
+ ATH9K_PHYERR_CCK_TIMING = 25,
+ ATH9K_PHYERR_CCK_HEADER_CRC = 26,
+ ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
+ ATH9K_PHYERR_CCK_SERVICE = 30,
+ ATH9K_PHYERR_CCK_RESTART = 31,
+ ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
+ ATH9K_PHYERR_CCK_POWER_DROP = 33,
+
+ ATH9K_PHYERR_HT_CRC_ERROR = 34,
+ ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
+ ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
+
+ ATH9K_PHYERR_MAX = 37,
+};
+
struct ath_desc {
u32 ds_link;
u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c48743452515..115e1aeedb59 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
#include "ath9k.h"
#include "btcoex.h"
-static char *dev_info = "ath9k";
-
-MODULE_AUTHOR("Atheros Communications");
-MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
-MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
-
-static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
-module_param_named(debug, ath9k_debug, uint, 0);
-MODULE_PARM_DESC(debug, "Debugging mask");
-
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_2ghz_chantable[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_5ghz_chantable[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
-};
-
static void ath_cache_conf_rate(struct ath_softc *sc,
struct ieee80211_conf *conf)
{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
return channel;
}
-static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
{
unsigned long flags;
bool ret;
@@ -255,11 +143,13 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_enabled &&
- !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK)))
+ if (sc->ps_idle)
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+ else if (sc->ps_enabled &&
+ !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK)))
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
unlock:
@@ -316,7 +206,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
r = ath9k_hw_reset(ah, hchan, fastcc);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel (%u Mhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
@@ -349,7 +239,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
* When the task is complete, it reschedules itself depending on the
* appropriate interval that was calculated.
*/
-static void ath_ani_calibrate(unsigned long data)
+void ath_ani_calibrate(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -363,14 +253,6 @@ static void ath_ani_calibrate(unsigned long data)
short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
- /*
- * don't calibrate when we're scanning.
- * we are most likely not on our home channel.
- */
- spin_lock(&sc->ani_lock);
- if (sc->sc_flags & SC_OP_SCANNING)
- goto set_timer;
-
/* Only calibrate if awake */
if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
goto set_timer;
@@ -437,7 +319,6 @@ static void ath_ani_calibrate(unsigned long data)
ath9k_ps_restore(sc);
set_timer:
- spin_unlock(&sc->ani_lock);
/*
* Set timer interval based on previous results.
* The interval must be the shortest necessary to satisfy ANI,
@@ -513,7 +394,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-static void ath9k_tasklet(unsigned long data)
+void ath9k_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -545,7 +426,7 @@ static void ath9k_tasklet(unsigned long data)
*/
ath_print(common, ATH_DBG_PS,
"TSFOOR - Sync with next Beacon\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -646,7 +527,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON;
}
chip_reset:
@@ -928,49 +809,12 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
clear_bit(key->hw_key_idx + 64, common->keymap);
if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
clear_bit(key->hw_key_idx + 32, common->keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
}
}
-static void setup_ht_cap(struct ath_softc *sc,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 tx_streams, rx_streams;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- /* set up supported mcs set */
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
-
- if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
@@ -992,7 +836,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
* on the receipt of the first Beacon frame (i.e.,
* after time sync with the AP).
*/
- sc->sc_flags |= SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_BEACON_SYNC;
/* Configure the beacon */
ath_beacon_config(sc, vif);
@@ -1009,174 +853,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
}
}
-/********************************/
-/* LED functions */
-/********************************/
-
-static void ath_led_blink_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- ath_led_blink_work.work);
-
- if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
- return;
-
- if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work,
- (sc->sc_flags & SC_OP_LED_ON) ?
- msecs_to_jiffies(sc->led_off_duration) :
- msecs_to_jiffies(sc->led_on_duration));
-
- sc->led_on_duration = sc->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- sc->led_off_duration = sc->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- sc->led_on_cnt = sc->led_off_cnt = 0;
- if (sc->sc_flags & SC_OP_LED_ON)
- sc->sc_flags &= ~SC_OP_LED_ON;
- else
- sc->sc_flags |= SC_OP_LED_ON;
-}
-
-static void ath_led_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath_softc *sc = led->sc;
-
- switch (brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- sc->sc_flags &= ~SC_OP_LED_ON;
- } else {
- sc->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- sc->sc_flags |= SC_OP_LED_ON;
- } else {
- sc->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->sc = sc;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
- return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
-}
-
-static void ath_deinit_leds(struct ath_softc *sc)
-{
- ath_unregister_led(&sc->assoc_led);
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- ath_unregister_led(&sc->tx_led);
- ath_unregister_led(&sc->rx_led);
- ath_unregister_led(&sc->radio_led);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-}
-
-static void ath_init_leds(struct ath_softc *sc)
-{
- char *trigger;
- int ret;
-
- if (AR_SREV_9287(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9287;
- else
- sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(sc->hw);
- snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
- "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->radio_led, trigger);
- sc->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(sc->hw);
- snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->assoc_led, trigger);
- sc->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(sc->hw);
- snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
- "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->tx_led, trigger);
- sc->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(sc->hw);
- snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
- "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->rx_led, trigger);
- sc->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- return;
-
-fail:
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
- ath_deinit_leds(sc);
-}
-
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1194,7 +870,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) ",
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1249,7 +925,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1261,711 +937,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
-/*******************/
-/* Rfkill */
-/*******************/
-
-static bool ath_is_rfkill_set(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
- ah->rfkill_polarity;
-}
-
-static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- bool blocked = !!ath_is_rfkill_set(sc);
-
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-}
-
-static void ath_start_rfkill_poll(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- wiphy_rfkill_start_polling(sc->hw->wiphy);
-}
-
-static void ath9k_uninit_hw(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- BUG_ON(!ah);
-
- ath9k_exit_debug(ah);
- ath9k_hw_detach(ah);
- sc->sc_ah = NULL;
-}
-
-static void ath_clean_core(struct ath_softc *sc)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- int i = 0;
-
- ath9k_ps_wakeup(sc);
-
- dev_dbg(sc->dev, "Detach ATH hw\n");
-
- ath_deinit_leds(sc);
- wiphy_rfkill_stop_polling(sc->hw->wiphy);
-
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
- ieee80211_unregister_hw(hw);
- ath_rx_cleanup(sc);
- ath_tx_cleanup(sc);
-
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
-
- if (!(sc->sc_flags & SC_OP_INVALID))
- ath9k_setpower(sc, ATH9K_PM_AWAKE);
-
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- if ((sc->btcoex.no_stomp_timer) &&
- ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
-}
-
-void ath_detach(struct ath_softc *sc)
-{
- ath_clean_core(sc);
- ath9k_uninit_hw(sc);
-}
-
-void ath_cleanup(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_clean_core(sc);
- free_irq(sc->irq, sc);
- ath_bus_cleanup(common);
- kfree(sc->sec_wiphy);
- ieee80211_free_hw(sc->hw);
-
- ath9k_uninit_hw(sc);
-}
-
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
-
- return ath_reg_notifier_apply(wiphy, request, reg);
-}
-
-/*
- * Detects if there is any priority bt traffic
- */
-static void ath_detect_bt_priority(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
- btcoex->bt_priority_cnt++;
-
- if (time_after(jiffies, btcoex->bt_priority_time +
- msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
- "BT priority traffic detected");
- sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
- } else {
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
- }
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- }
-}
-
-/*
- * Configures appropriate weight based on stomp type.
- */
-static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
- enum ath_stomp_type stomp_type)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_ALL_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_NONE_WLAN_WGHT);
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
- }
-
- ath9k_hw_btcoex_enable(ah);
-}
-
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 timer_next,
- u32 timer_period)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
-
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
- ath9k_hw_gen_timer_stop(ah, timer);
-
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-/*
- * This is the master bt coex timer which runs for every
- * 45ms, bt traffic will be given priority during 55% of this
- * period while wlan gets remaining 45%
- */
-static void ath_btcoex_period_timer(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *) data;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_detect_bt_priority(sc);
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- ath9k_gen_timer_start(ah,
- btcoex->no_stomp_timer,
- (ath9k_hw_gettsf32(ah) +
- btcoex->btcoex_no_stomp),
- btcoex->btcoex_no_stomp * 10);
- btcoex->hw_timer_enabled = true;
- }
-
- mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
-}
-
-/*
- * Generic tsf based hw timer which configures weight
- * registers to time slice between wlan and bt traffic
- */
-static void ath_btcoex_no_stomp_timer(void *arg)
-{
- struct ath_softc *sc = (struct ath_softc *)arg;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
- else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-}
-
-static int ath_init_btcoex_timer(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
- btcoex->btcoex_period / 100;
-
- setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
- (unsigned long) sc);
-
- spin_lock_init(&btcoex->btcoex_lock);
-
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
-
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * Read and write, they both share the same lock. We do this to serialize
- * reads and writes on Atheros 802.11n PCI devices only. This is required
- * as the FIFO on these devices can only accept sanely 2 requests. After
- * that the device goes bananas. Serializing the reads/writes prevents this
- * from happening.
- */
-
-static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- iowrite32(val, sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- iowrite32(val, sc->mem + reg_offset);
-}
-
-static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- u32 val;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- val = ioread32(sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- val = ioread32(sc->mem + reg_offset);
- return val;
-}
-
-static const struct ath_ops ath9k_common_ops = {
- .read = ath9k_ioread32,
- .write = ath9k_iowrite32,
-};
-
-/*
- * Initialize and fill ath_softc, ath_sofct is the
- * "Software Carrier" struct. Historically it has existed
- * to allow the separation between hardware specific
- * variables (now in ath_hw) and driver specific variables.
- */
-static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ath_hw *ah = NULL;
- struct ath_common *common;
- int r = 0, i;
- int csz = 0;
- int qnum;
-
- /* XXX: hardware will not be ready until ath_open() being called */
- sc->sc_flags |= SC_OP_INVALID;
-
- spin_lock_init(&sc->wiphy_lock);
- spin_lock_init(&sc->sc_resetlock);
- spin_lock_init(&sc->sc_serial_rw);
- spin_lock_init(&sc->ani_lock);
- spin_lock_init(&sc->sc_pm_lock);
- mutex_init(&sc->mutex);
- tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
- (unsigned long)sc);
-
- ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
- if (!ah)
- return -ENOMEM;
-
- ah->hw_version.devid = devid;
- ah->hw_version.subsysid = subsysid;
- sc->sc_ah = ah;
-
- common = ath9k_hw_common(ah);
- common->ops = &ath9k_common_ops;
- common->bus_ops = bus_ops;
- common->ah = ah;
- common->hw = sc->hw;
- common->priv = sc;
- common->debug_mask = ath9k_debug;
-
- /*
- * Cache line size is used to size and align various
- * structures used to communicate with the hardware.
- */
- ath_read_cachesize(common, &csz);
- /* XXX assert csz is non-zero */
- common->cachelsz = csz << 2; /* convert to bytes */
-
- r = ath9k_hw_init(ah);
- if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", r);
- goto bad_free_hw;
- }
-
- if (ath9k_init_debug(ah) < 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to create debugfs files\n");
- goto bad_free_hw;
- }
-
- /* Get the hardware key cache size. */
- common->keymax = ah->caps.keycache_size;
- if (common->keymax > ATH_KEYMAX) {
- ath_print(common, ATH_DBG_ANY,
- "Warning, using only %u entries in %u key cache\n",
- ATH_KEYMAX, common->keymax);
- common->keymax = ATH_KEYMAX;
- }
-
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < common->keymax; i++)
- ath9k_hw_keyreset(ah, (u16) i);
-
- /* default to MONITOR mode */
- sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
-
- /*
- * Allocate hardware transmit queues: one queue for
- * beacon frames and one data queue for each QoS
- * priority. Note that the hal handles reseting
- * these queues at the needed time.
- */
- sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
- if (sc->beacon.beaconq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup a beacon xmit queue\n");
- r = -EIO;
- goto bad2;
- }
- sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- if (sc->beacon.cabq == NULL) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup CAB xmit queue\n");
- r = -EIO;
- goto bad2;
- }
-
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
- ath_cabq_update(sc);
-
- for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
- sc->tx.hwq_map[i] = -1;
-
- /* Setup data queues */
- /* NB: ensure BK queue is the lowest priority h/w queue */
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BK traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BE traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VI traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VO traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- /* Initializes the noise floor to a reasonable default value.
- * Later on this will be updated during ANI processing. */
-
- common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
- setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
-
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
- 0, 1, NULL);
- }
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
- common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
- 1, NULL);
-
- sc->config.txpowlimit = ATH_TXPOWER_MAX;
-
- /* 11n Capabilities */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- sc->sc_flags |= SC_OP_TXAGGR;
- sc->sc_flags |= SC_OP_RXAGGR;
- }
-
- common->tx_chainmask = ah->caps.tx_chainmask;
- common->rx_chainmask = ah->caps.rx_chainmask;
-
- ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
- sc->rx.defant = ath9k_hw_getdefantenna(ah);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
- memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
-
- sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
-
- /* initialize beacon slots */
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
-
- /* setup channels and rates */
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
- sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_chantable);
- sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
- sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_chantable);
- sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
-
- switch (ah->btcoex_hw.scheme) {
- case ATH_BTCOEX_CFG_NONE:
- break;
- case ATH_BTCOEX_CFG_2WIRE:
- ath9k_hw_btcoex_init_2wire(ah);
- break;
- case ATH_BTCOEX_CFG_3WIRE:
- ath9k_hw_btcoex_init_3wire(ah);
- r = ath_init_btcoex_timer(sc);
- if (r)
- goto bad2;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- ath9k_hw_init_btcoex_hw(ah, qnum);
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- return 0;
-bad2:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
-bad_free_hw:
- ath9k_uninit_hw(sc);
- return r;
-}
-
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->queues = 4;
- hw->max_rates = 4;
- hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
- /* Hardware supports 10 but we use 4 */
- hw->max_rate_tries = 4;
- hw->sta_data_size = sizeof(struct ath_node);
- hw->vif_data_size = sizeof(struct ath_vif);
-
- hw->rate_control_algorithm = "ath9k_rate_control";
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &sc->sbands[IEEE80211_BAND_2GHZ];
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &sc->sbands[IEEE80211_BAND_5GHZ];
-}
-
-/* Device driver core initialization */
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_common *common;
- struct ath_hw *ah;
- int error = 0, i;
- struct ath_regulatory *reg;
-
- dev_dbg(sc->dev, "Attach ATH hw\n");
-
- error = ath_init_softc(devid, sc, subsysid, bus_ops);
- if (error != 0)
- return error;
-
- ah = sc->sc_ah;
- common = ath9k_hw_common(ah);
-
- /* get mac address from hardware and set in mac80211 */
-
- SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
-
- ath_set_hw_capab(sc, hw);
-
- error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
- ath9k_reg_notifier);
- if (error)
- return error;
-
- reg = &common->regulatory;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
-
- /* initialize tx/rx engine */
- error = ath_tx_init(sc, ATH_TXBUF);
- if (error != 0)
- goto error_attach;
-
- error = ath_rx_init(sc, ATH_RXBUF);
- if (error != 0)
- goto error_attach;
-
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
-
- error = ieee80211_register_hw(hw);
-
- if (!ath_is_world_regd(reg)) {
- error = regulatory_hint(hw->wiphy, reg->alpha2);
- if (error)
- goto error_attach;
- }
-
- /* Initialize LED control */
- ath_init_leds(sc);
-
- ath_start_rfkill_poll(sc);
-
- return 0;
-
-error_attach:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- ath9k_uninit_hw(sc);
-
- return error;
-}
-
int ath_reset(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1973,6 +944,11 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
struct ieee80211_hw *hw = sc->hw;
int r;
+ /* Stop ANI */
+ del_timer_sync(&common->ani.timer);
+
+ ieee80211_stop_queues(hw);
+
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@@ -2014,126 +990,12 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
- return r;
-}
-
-/*
- * This function will allocate both the DMA descriptor structure, and the
- * buffers it contains. These are used to contain the descriptors used
- * by the system.
-*/
-int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
- struct list_head *head, const char *name,
- int nbuf, int ndesc)
-{
-#define DS2PHYS(_dd, _ds) \
- ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
-#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
-#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
- struct ath_buf *bf;
- int i, bsize, error;
-
- ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
- name, nbuf, ndesc);
-
- INIT_LIST_HEAD(head);
- /* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
- error = -ENOMEM;
- goto fail;
- }
-
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
-
- /*
- * Need additional DMA memory because we can't use
- * descriptors that cross the 4K page boundary. Assume
- * one skipped descriptor per 4K page.
- */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- u32 ndesc_skipped =
- ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
- u32 dma_len;
-
- while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
- dd->dd_desc_len += dma_len;
-
- ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
- }
-
- /* allocate descriptors */
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
- if (dd->dd_desc == NULL) {
- error = -ENOMEM;
- goto fail;
- }
- ds = dd->dd_desc;
- ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
- name, ds, (u32) dd->dd_desc_len,
- ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
-
- /* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = kzalloc(bsize, GFP_KERNEL);
- if (bf == NULL) {
- error = -ENOMEM;
- goto fail2;
- }
- dd->dd_bufptr = bf;
-
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += ndesc;
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
- }
- }
- list_add_tail(&bf->list, head);
- }
- return 0;
-fail2:
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-fail:
- memset(dd, 0, sizeof(*dd));
- return error;
-#undef ATH_DESC_4KB_BOUND_CHECK
-#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
-#undef DS2PHYS
-}
+ ieee80211_wake_queues(hw);
-void ath_descdma_cleanup(struct ath_softc *sc,
- struct ath_descdma *dd,
- struct list_head *head)
-{
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
+ /* Start ANI */
+ ath_start_ani(common);
- INIT_LIST_HEAD(head);
- kfree(dd->dd_bufptr);
- memset(dd, 0, sizeof(*dd));
+ return r;
}
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
@@ -2214,28 +1076,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
/* mac80211 callbacks */
/**********************/
-/*
- * (Re)start btcoex timers
- */
-static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers");
-
- /* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
-
- mod_timer(&btcoex->period_timer, jiffies);
-}
-
static int ath9k_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2405,11 +1245,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
if (ieee80211_is_pspoll(hdr->frame_control)) {
ath_print(common, ATH_DBG_PS,
"Sending PS-Poll to pick a buffered frame\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
ath_print(common, ATH_DBG_PS,
"Wake up to complete TX\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
+ sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
* The actual restore operation will happen only after
@@ -2462,22 +1302,6 @@ exit:
return 0;
}
-/*
- * Pause btcoex timer and bt duty cycle timer
- */
-static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- del_timer_sync(&btcoex->period_timer);
-
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- btcoex->hw_timer_enabled = false;
-}
-
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2508,6 +1332,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
return; /* another wiphy still in use */
}
+ /* Ensure HW is awake when we try to shut it down. */
+ ath9k_ps_wakeup(sc);
+
if (ah->btcoex_hw.enabled) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -2528,6 +1355,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_ps_restore(sc);
+
+ /* Finally, put the chip in FULL SLEEP mode */
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
sc->sc_flags |= SC_OP_INVALID;
@@ -2538,12 +1368,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
static int ath9k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
@@ -2555,7 +1385,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
goto out;
}
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
ic_opmode = NL80211_IFTYPE_STATION;
break;
@@ -2566,11 +1396,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ret = -ENOBUFS;
goto out;
}
- ic_opmode = conf->type;
+ ic_opmode = vif->type;
break;
default:
ath_print(common, ATH_DBG_FATAL,
- "Interface type %d not yet supported\n", conf->type);
+ "Interface type %d not yet supported\n", vif->type);
ret = -EOPNOTSUPP;
goto out;
}
@@ -2602,18 +1432,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
* Enable MIB interrupts when there are hardware phy counters.
* Note we only do this (at the moment) for station mode.
*/
- if ((conf->type == NL80211_IFTYPE_STATION) ||
- (conf->type == NL80211_IFTYPE_ADHOC) ||
- (conf->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
sc->imask |= ATH9K_INT_MIB;
sc->imask |= ATH9K_INT_TSFOOR;
}
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
- if (conf->type == NL80211_IFTYPE_AP ||
- conf->type == NL80211_IFTYPE_ADHOC ||
- conf->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MONITOR)
ath_start_ani(common);
out:
@@ -2622,12 +1452,12 @@ out:
}
static void ath9k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
int i;
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -2641,14 +1471,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ ath9k_ps_wakeup(sc);
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- ath_beacon_return(sc, avp);
+ ath9k_ps_restore(sc);
}
+ ath_beacon_return(sc, avp);
sc->sc_flags &= ~SC_OP_BEACONS;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- if (sc->beacon.bslot[i] == conf->vif) {
+ if (sc->beacon.bslot[i] == vif) {
printk(KERN_DEBUG "%s: vif had allocated beacon "
"slot\n", __func__);
sc->beacon.bslot[i] = NULL;
@@ -2661,6 +1493,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&sc->mutex);
}
+void ath9k_enable_ps(struct ath_softc *sc)
+{
+ sc->ps_enabled = true;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ sc->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->imask);
+ }
+ }
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+}
+
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2687,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
all_wiphys_idle = ath9k_all_wiphys_idle(sc);
ath9k_set_wiphy_idle(aphy, idle);
- if (!idle && all_wiphys_idle)
- enable_radio = true;
+ enable_radio = (!idle && all_wiphys_idle);
/*
* After we unlock here its possible another wiphy
@@ -2699,6 +1543,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&sc->wiphy_lock);
if (enable_radio) {
+ sc->ps_idle = false;
ath_radio_enable(sc, hw);
ath_print(common, ATH_DBG_CONFIG,
"not-idle: enabling radio\n");
@@ -2713,36 +1558,27 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
- sc->sc_flags |= SC_OP_PS_ENABLED;
- if (!(ah->caps.hw_caps &
- ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
- }
- }
+ sc->ps_flags |= PS_ENABLED;
/*
* At this point we know hardware has received an ACK
* of a previously sent null data frame.
*/
- if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
+ ath9k_enable_ps(sc);
}
} else {
sc->ps_enabled = false;
- sc->sc_flags &= ~(SC_OP_PS_ENABLED |
- SC_OP_NULLFUNC_COMPLETED);
+ sc->ps_flags &= ~(PS_ENABLED |
+ PS_NULLFUNC_COMPLETED);
ath9k_setpower(sc, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK);
+ sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK);
if (sc->imask & ATH9K_INT_TIM_TIMER) {
sc->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2752,6 +1588,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -2787,8 +1631,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
skip_chan_change:
- if (changed & IEEE80211_CONF_CHANGE_POWER)
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
sc->config.txpowlimit = 2 * conf->power_level;
+ ath_update_txpow(sc);
+ }
spin_lock_bh(&sc->wiphy_lock);
disable_radio = ath9k_all_wiphys_idle(sc);
@@ -2796,6 +1642,7 @@ skip_chan_change:
if (disable_radio) {
ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
+ sc->ps_idle = true;
ath_radio_disable(sc, hw);
}
@@ -2836,24 +1683,28 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
"Set HW RX filter: 0x%x\n", rfilt);
}
-static void ath9k_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static int ath9k_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
- switch (cmd) {
- case STA_NOTIFY_ADD:
- ath_node_attach(sc, sta);
- break;
- case STA_NOTIFY_REMOVE:
- ath_node_detach(sc, sta);
- break;
- default:
- break;
- }
+ ath_node_attach(sc, sta);
+
+ return 0;
+}
+
+static int ath9k_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+
+ ath_node_detach(sc, sta);
+
+ return 0;
}
static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -2952,6 +1803,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ int slottime;
int error;
mutex_lock(&sc->mutex);
@@ -2987,6 +1839,25 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath_beacon_config(sc, vif);
}
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ slottime = 9;
+ else
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ sc->beacon.slottime = slottime;
+ sc->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
+ }
+
/* Disable transmission of beacons */
if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -3091,15 +1962,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_start(sc, sta, tid, ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_resume(sc, sta, tid);
+ ath9k_ps_restore(sc);
break;
default:
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
@@ -3113,6 +1990,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
if (ath9k_wiphy_scanning(sc)) {
@@ -3128,10 +2006,9 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
aphy->state = ATH_WIPHY_SCAN;
ath9k_wiphy_pause_all_forced(sc, aphy);
-
- spin_lock_bh(&sc->ani_lock);
sc->sc_flags |= SC_OP_SCANNING;
- spin_unlock_bh(&sc->ani_lock);
+ del_timer_sync(&common->ani.timer);
+ cancel_delayed_work_sync(&sc->tx_complete_work);
mutex_unlock(&sc->mutex);
}
@@ -3139,17 +2016,30 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
- spin_lock_bh(&sc->ani_lock);
aphy->state = ATH_WIPHY_ACTIVE;
sc->sc_flags &= ~SC_OP_SCANNING;
sc->sc_flags |= SC_OP_FULL_RESET;
- spin_unlock_bh(&sc->ani_lock);
+ ath_start_ani(common);
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
ath_beacon_config(sc, NULL);
mutex_unlock(&sc->mutex);
}
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+
+ mutex_lock(&sc->mutex);
+ ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(ah);
+ mutex_unlock(&sc->mutex);
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -3158,7 +2048,8 @@ struct ieee80211_ops ath9k_ops = {
.remove_interface = ath9k_remove_interface,
.config = ath9k_config,
.configure_filter = ath9k_configure_filter,
- .sta_notify = ath9k_sta_notify,
+ .sta_add = ath9k_sta_add,
+ .sta_remove = ath9k_sta_remove,
.conf_tx = ath9k_conf_tx,
.bss_info_changed = ath9k_bss_info_changed,
.set_key = ath9k_set_key,
@@ -3169,64 +2060,5 @@ struct ieee80211_ops ath9k_ops = {
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
+ .set_coverage_class = ath9k_set_coverage_class,
};
-
-static int __init ath9k_init(void)
-{
- int error;
-
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- printk(KERN_ERR
- "ath9k: Unable to register rate control "
- "algorithm: %d\n",
- error);
- goto err_out;
- }
-
- error = ath9k_debug_create_root();
- if (error) {
- printk(KERN_ERR
- "ath9k: Unable to create debugfs root: %d\n",
- error);
- goto err_rate_unregister;
- }
-
- error = ath_pci_init();
- if (error < 0) {
- printk(KERN_ERR
- "ath9k: No PCI devices found, driver not installed.\n");
- error = -ENODEV;
- goto err_remove_root;
- }
-
- error = ath_ahb_init();
- if (error < 0) {
- error = -ENODEV;
- goto err_pci_exit;
- }
-
- return 0;
-
- err_pci_exit:
- ath_pci_exit();
-
- err_remove_root:
- ath9k_debug_remove_root();
- err_rate_unregister:
- ath_rate_control_unregister();
- err_out:
- return error;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
- ath_ahb_exit();
- ath_pci_exit();
- ath9k_debug_remove_root();
- ath_rate_control_unregister();
- printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
-}
-module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 5321f735e5a0..9441c6718a30 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,13 +18,14 @@
#include <linux/pci.h>
#include "ath9k.h"
-static struct pci_device_id ath_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ 0 }
@@ -49,16 +50,6 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
*csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
}
-static void ath_pci_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct pci_dev *pdev = to_pci_dev(sc->dev);
-
- pci_iounmap(pdev, sc->mem);
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
-}
-
static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_hw *ah = (struct ath_hw *) common->ah;
@@ -96,9 +87,8 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
}
-const static struct ath_bus_ops ath_pci_bus_ops = {
+static const struct ath_bus_ops ath_pci_bus_ops = {
.read_cachesize = ath_pci_read_cachesize,
- .cleanup = ath_pci_cleanup,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
};
@@ -113,25 +103,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u16 subsysid;
u32 val;
int ret = 0;
- struct ath_hw *ah;
char hw_name[64];
if (pci_enable_device(pdev))
return -EIO;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
- goto bad;
+ goto err_dma;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
"DMA enable failed\n");
- goto bad;
+ goto err_dma;
}
/*
@@ -171,22 +158,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
ret = -ENODEV;
- goto bad;
+ goto err_region;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
printk(KERN_ERR "PCI memory map error\n") ;
ret = -EIO;
- goto bad1;
+ goto err_iomap;
}
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
- dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
- goto bad2;
+ goto err_alloc_hw;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +188,25 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->dev = &pdev->dev;
sc->mem = mem;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
- ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
- goto bad3;
- }
-
- /* setup interrupt service routine */
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
- goto bad4;
+ goto err_irq;
}
sc->irq = pdev->irq;
- ah = sc->sc_ah;
- ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
+ ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device\n");
+ goto err_init;
+ }
+
+ ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
printk(KERN_INFO
"%s: %s mem=0x%lx, irq=%d\n",
wiphy_name(hw->wiphy),
@@ -227,15 +214,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(unsigned long)mem, pdev->irq);
return 0;
-bad4:
- ath_detach(sc);
-bad3:
+
+err_init:
+ free_irq(sc->irq, sc);
+err_irq:
ieee80211_free_hw(hw);
-bad2:
+err_alloc_hw:
pci_iounmap(pdev, mem);
-bad1:
+err_iomap:
pci_release_region(pdev, 0);
-bad:
+err_region:
+ /* Nothing */
+err_dma:
pci_disable_device(pdev);
return ret;
}
@@ -245,8 +235,15 @@ static void ath_pci_remove(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
+
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
- ath_cleanup(sc);
+ pci_iounmap(pdev, mem);
+ pci_disable_device(pdev);
+ pci_release_region(pdev, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
index c3b59390fe38..2547b3c4a26c 100644
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ b/drivers/net/wireless/ath/ath9k/phy.c
@@ -39,6 +39,8 @@
* AR9287 - 11n single-band 1x1 MIMO for USB
*/
+#include <linux/slab.h>
+
#include "hw.h"
/**
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 31de27dc0c4a..0999a495fd46 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -384,6 +384,9 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
#define AR_PHY_M_SLEEP 0x99f0
#define AR_PHY_REFCLKDLY 0x99f4
#define AR_PHY_REFCLKPD 0x99f8
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 70fdb9d8db82..244e1c629177 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -15,6 +15,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/slab.h>
+
#include "ath9k.h"
static const struct ath_rate_table ar5416_11na_ratetable = {
@@ -668,7 +670,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate *rates = tx_info->control.rates;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix, nrix;
+ u8 try_per_rate, i = 0, rix;
int is_probe = 0;
if (rate_control_send_low(sta, priv_sta, txrc))
@@ -678,48 +680,47 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
* For Multi Rate Retry we use a different number of
* retry attempt counts. This ends up looking like this:
*
- * MRR[0] = 2
- * MRR[1] = 2
- * MRR[2] = 2
- * MRR[3] = 4
+ * MRR[0] = 4
+ * MRR[1] = 4
+ * MRR[2] = 4
+ * MRR[3] = 8
*
*/
- try_per_rate = sc->hw->max_rate_tries;
+ try_per_rate = 4;
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
- nrix = rix;
if (is_probe) {
/* set one try for probe rates. For the
* probes don't enable rts */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- 1, nrix, 0);
+ 1, rix, 0);
/* Get the next tried/allowed rate. No RTS for the next series
* after the probe rate
*/
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, nrix, 0);
+ try_per_rate, rix, 0);
tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
/* Set the choosen rate. No RTS for first series entry. */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, nrix, 0);
+ try_per_rate, rix, 0);
}
/* Fill in the other rates for multirate retry */
for ( ; i < 4; i++) {
/* Use twice the number of tries for the last MRR segment. */
if (i + 1 == 4)
- try_per_rate = 4;
+ try_per_rate = 8;
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
/* All other rates in the series have RTS enabled */
ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, nrix, 1);
+ try_per_rate, rix, 1);
}
/*
@@ -1324,7 +1325,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
+ u32 changed, enum nl80211_channel_type oper_chan_type)
{
struct ath_softc *sc = priv;
struct ath_rate_priv *ath_rc_priv = priv_sta;
@@ -1341,8 +1342,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
return;
- if (sc->hw->conf.channel_type == NL80211_CHAN_HT40MINUS ||
- sc->hw->conf.channel_type == NL80211_CHAN_HT40PLUS)
+ if (oper_chan_type == NL80211_CHAN_HT40MINUS ||
+ oper_chan_type == NL80211_CHAN_HT40PLUS)
oper_cw40 = true;
oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 9eb96f506998..4f6d6fd442f4 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -57,6 +57,10 @@ enum {
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS) \
+ || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI))
#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae69..1ca42e5148c8 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
return; /* not from our current AP */
- sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
- if (sc->sc_flags & SC_OP_BEACON_SYNC) {
- sc->sc_flags &= ~SC_OP_BEACON_SYNC;
+ if (sc->ps_flags & PS_BEACON_SYNC) {
+ sc->ps_flags &= ~PS_BEACON_SYNC;
ath_print(common, ATH_DBG_PS,
"Reconfigure Beacon timers based on "
"timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
*/
ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) {
+ if (sc->ps_flags & PS_WAIT_FOR_CAB) {
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"PS wait for CAB frames timed out\n");
}
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
/* Process Beacon and CAB receive in PS state */
- if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) &&
+ if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
ieee80211_is_beacon(hdr->frame_control))
ath_rx_ps_beacon(sc, skb);
- else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) &&
+ else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_action(hdr->frame_control)) &&
is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
* No more broadcast/multicast frames to be received at this
* point.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"All PS CAB frames received, back to sleep\n");
- } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
+ } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having received "
- "PS-Poll data (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "PS-Poll data (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
}
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
hw = ath_get_virt_hw(sc, hdr);
rx_stats = &ds->ds_rxstat;
+ ath_debug_stat_rx(sc, bf);
+
/*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
sc->rx.rxotherant = 0;
}
- if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA)))
+ if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA)))
ath_rx_ps(sc, skb);
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8e653fb937a1..72cfa8ebd9ae 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1547,9 +1547,9 @@ enum {
#define AR_BT_COEX_WEIGHT 0x8174
#define AR_BT_COEX_WGHT 0xff55
-#define AR_STOMP_ALL_WLAN_WGHT 0xffcc
-#define AR_STOMP_LOW_WLAN_WGHT 0xaaa8
-#define AR_STOMP_NONE_WLAN_WGHT 0xaa00
+#define AR_STOMP_ALL_WLAN_WGHT 0xfcfc
+#define AR_STOMP_LOW_WLAN_WGHT 0xa8a8
+#define AR_STOMP_NONE_WLAN_WGHT 0x0000
#define AR_BTCOEX_BT_WGHT 0x0000ffff
#define AR_BTCOEX_BT_WGHT_S 0
#define AR_BTCOEX_WL_WGHT 0xffff0000
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e7..00c0e21a4af7 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -14,6 +14,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/slab.h>
+
#include "ath9k.h"
struct ath9k_vif_iter_data {
@@ -152,7 +154,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
SET_IEEE80211_PERM_ADDR(hw, addr);
- ath_set_hw_capab(sc, hw);
+ ath9k_set_hw_capab(sc, hw);
error = ieee80211_register_hw(hw);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 2a11cc57ceea..294b486bc3ed 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
if (npend) {
int r;
- ath_print(common, ATH_DBG_XMIT,
+ ath_print(common, ATH_DBG_FATAL,
"Unable to stop TxDMA. Reset HAL!\n");
spin_lock_bh(&sc->sc_resetlock);
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
+ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
if (r)
ath_print(common, ATH_DBG_FATAL,
"Unable to reset hardware; reset status %d\n",
@@ -1353,25 +1353,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
return htype;
}
-static bool is_pae(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
- __le16 fc;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
-
- if (ieee80211_is_data(fc)) {
- if (ieee80211_is_nullfunc(fc) ||
- /* Port Access Entity (IEEE 802.1X) */
- (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- return true;
- }
- }
-
- return false;
-}
-
static int get_hw_crypto_keytype(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1414,17 +1395,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
* For HT capable stations, we save tidno for later use.
* We also override seqno set by upper layer with the one
* in tx aggregation state.
- *
- * If fragmentation is on, the sequence number is
- * not overridden, since it has been
- * incremented by the fragmentation routine.
- *
- * FIXME: check if the fragmentation threshold exceeds
- * IEEE80211 max.
*/
tid = ATH_AN_2_TID(an, bf->bf_tidno);
- hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
- IEEE80211_SEQ_SEQ_SHIFT);
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
bf->bf_seqno = tid->seq_next;
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
@@ -1506,26 +1479,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
ctsrate |= rate->hw_value_short;
- /*
- * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
- * Check the first rate in the series to decide whether RTS/CTS
- * or CTS-to-self has to be used.
- */
- if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- flags = ATH9K_TXDESC_CTSENA;
- else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- flags = ATH9K_TXDESC_RTSENA;
-
- /* FIXME: Handle aggregation protection */
- if (sc->config.ath_aggr_prot &&
- (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
- flags = ATH9K_TXDESC_RTSENA;
- }
-
- /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
- if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
- flags &= ~(ATH9K_TXDESC_RTSENA);
-
for (i = 0; i < 4; i++) {
bool is_40, is_sgi, is_sp;
int phy;
@@ -1537,8 +1490,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
series[i].Tries = rates[i].count;
series[i].ChSel = common->tx_chainmask;
- if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
+ (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_RTSENA;
+ } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_CTSENA;
+ }
+
if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
series[i].RateFlags |= ATH9K_RATESERIES_2040;
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
@@ -1576,6 +1536,14 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
}
+ /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+ if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
+ flags &= ~ATH9K_TXDESC_RTSENA;
+
+ /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
+ if (flags & ATH9K_TXDESC_RTSENA)
+ flags &= ~ATH9K_TXDESC_CTSENA;
+
/* set dur_update_en for l-sig computation except for PS-Poll frames */
ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
bf->bf_lastbf->bf_desc,
@@ -1623,7 +1591,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf) && !is_pae(skb))
+ if (conf_is_ht(&hw->conf))
bf->bf_state.bf_type |= BUF_HT;
bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1636,7 +1604,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
}
- if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
+ if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
+ (sc->sc_flags & SC_OP_TXAGGR))
assign_aggr_tid_seqno(skb, bf);
bf->bf_mpdu = skb;
@@ -1655,7 +1624,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
/* tag if this is a nullfunc frame to enable PS when AP acks it */
if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
bf->bf_isnullfunc = true;
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
} else
bf->bf_isnullfunc = false;
@@ -1780,7 +1749,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int hdrlen, padsize;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int padpos, padsize;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath_tx_control txctl;
@@ -1792,7 +1762,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
* BSSes.
*/
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
sc->tx.seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@@ -1800,9 +1769,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* Add the padding after the header if this is not already done */
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- if (hdrlen & 3) {
- padsize = hdrlen % 4;
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos) {
if (skb_headroom(skb) < padsize) {
ath_print(common, ATH_DBG_XMIT,
"TX CABQ padding failed\n");
@@ -1810,7 +1779,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
skb_push(skb, padsize);
- memmove(skb->data, skb->data + padsize, hdrlen);
+ memmove(skb->data, skb->data + padsize, padpos);
}
txctl.txq = sc->beacon.cabq;
@@ -1838,7 +1807,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int hdrlen, padsize;
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ int padpos, padsize;
ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
@@ -1853,26 +1823,26 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = hdrlen & 3;
- if (padsize && hdrlen >= 24) {
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos+padsize) {
/*
* Remove MAC header padding before giving the frame back to
* mac80211.
*/
- memmove(skb->data + padsize, skb->data, hdrlen);
+ memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
+ if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having "
- "received TX status (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "received TX status (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2059,11 +2029,10 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
- } else
- sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
+ if ((sc->ps_flags & PS_ENABLED))
+ ath9k_enable_ps(sc);
+ else
+ sc->ps_flags |= PS_NULLFUNC_COMPLETED;
}
/*
@@ -2078,7 +2047,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
+ txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
@@ -2270,7 +2239,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
if (ATH_TXQ_SETUP(sc, i)) {
txq = &sc->tx.txq[i];
- spin_lock(&txq->axq_lock);
+ spin_lock_bh(&txq->axq_lock);
list_for_each_entry_safe(ac,
ac_tmp, &txq->axq_acq, list) {
@@ -2291,7 +2260,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
}
}
- spin_unlock(&txq->axq_lock);
+ spin_unlock_bh(&txq->axq_lock);
}
}
}
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index d6b685a06c5e..8263633c003c 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -65,11 +65,11 @@ enum ATH_DEBUG {
#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
#ifdef CONFIG_ATH_DEBUG
-void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...);
+void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
#else
-static inline void ath_print(struct ath_common *common,
- int dbg_mask,
- const char *fmt, ...)
+static inline void __attribute__ ((format (printf, 3, 4)))
+ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
{
}
#endif /* CONFIG_ATH_DEBUG */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 039ac490465c..00489c40be0c 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -15,7 +15,6 @@
*/
#include <linux/kernel.h>
-#include <linux/slab.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "regd.h"
@@ -110,8 +109,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = {
static inline bool is_wwr_sku(u16 regd)
{
- return ((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
- (regd == WORLD);
+ return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
+ (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
+ (regd == WORLD));
}
static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750f..9ab1192004c0 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced3..0a00d42642cd 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -78,11 +78,11 @@ config B43_SDIO
If unsure, say N.
-# Data transfers to the device via PIO
-# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
+#Data transfers to the device via PIO. We want it as a fallback even
+# if we can do DMA.
config B43_PIO
bool
- depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
+ depends on B43
select SSB_BLOCKIO
default y
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542dc..5e83b6f0a3a0 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
b43-y += lo.o
b43-y += wa.o
b43-y += dma.o
-b43-$(CONFIG_B43_PIO) += pio.o
+b43-y += pio.o
b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf9491997..b8807fb12c92 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
#define B43_MMIO_RNG 0x65A
+#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
#define B43_MMIO_POWERUP_DELAY 0x6A8
@@ -253,6 +254,14 @@ enum {
#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */
#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
+/* SHM_SHARED tx iq workarounds */
+#define B43_SHM_SH_NPHY_TXIQW0 0x0700
+#define B43_SHM_SH_NPHY_TXIQW1 0x0702
+#define B43_SHM_SH_NPHY_TXIQW2 0x0704
+#define B43_SHM_SH_NPHY_TXIQW3 0x0706
+/* SHM_SHARED tx pwr ctrl */
+#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708
+#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E
/* SHM_SCRATCH offsets */
#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */
@@ -693,6 +702,7 @@ struct b43_wldev {
bool radio_hw_enable; /* saved state of radio hardware enabled state */
bool qos_enabled; /* TRUE, if QoS is used. */
bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
+ bool use_pio; /* TRUE if next init should use PIO */
/* PHY/Radio device. */
struct b43_phy phy;
@@ -821,11 +831,9 @@ struct b43_wl {
/* The device LEDs. */
struct b43_leds leds;
-#ifdef CONFIG_B43_PIO
/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
u8 pio_tailspace[4] __attribute__((__aligned__(8)));
-#endif /* CONFIG_B43_PIO */
};
static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -876,20 +884,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
{
-#ifdef CONFIG_B43_PIO
return dev->__using_pio_transfers;
-#else
- return 0;
-#endif
}
#ifdef CONFIG_B43_FORCE_PIO
-# define B43_FORCE_PIO 1
+# define B43_PIO_DEFAULT 1
#else
-# define B43_FORCE_PIO 0
+# define B43_PIO_DEFAULT 0
#endif
-
/* Message printing */
void b43info(struct b43_wl *wl, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 027be275e035..fa40fdfea719 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -38,6 +38,7 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <asm/div64.h>
@@ -383,160 +384,44 @@ static inline
}
}
-/* Check if a DMA region fits the device constraints.
- * Returns true, if the region is OK for usage with this device. */
-static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
- dma_addr_t addr, size_t size)
-{
- switch (ring->type) {
- case B43_DMA_30BIT:
- if ((u64)addr + size > (1ULL << 30))
- return 0;
- break;
- case B43_DMA_32BIT:
- if ((u64)addr + size > (1ULL << 32))
- return 0;
- break;
- case B43_DMA_64BIT:
- /* Currently we can't have addresses beyond
- * 64bit in the kernel. */
- break;
- }
- return 1;
-}
-
-#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
-#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
-
-static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
- dma_addr_t dmaaddr, size_t size)
-{
- ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
- free_pages((unsigned long)base, get_order(size));
-}
-
-static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
- dma_addr_t *dmaaddr, size_t size,
- gfp_t gfp_flags)
-{
- void *base;
-
- base = (void *)__get_free_pages(gfp_flags, get_order(size));
- if (!base)
- return NULL;
- memset(base, 0, size);
- *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
- DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
- free_pages((unsigned long)base, get_order(size));
- return NULL;
- }
-
- return base;
-}
-
-static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
- dma_addr_t *dmaaddr, size_t size)
-{
- void *base;
-
- base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
- GFP_KERNEL);
- if (!base) {
- b43err(ring->dev->wl, "Failed to allocate or map pages "
- "for DMA ringmemory\n");
- return NULL;
- }
- if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
- /* The memory does not fit our device constraints.
- * Retry with GFP_DMA set to get lower memory. */
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
- GFP_KERNEL | GFP_DMA);
- if (!base) {
- b43err(ring->dev->wl, "Failed to allocate or map pages "
- "in the GFP_DMA region for DMA ringmemory\n");
- return NULL;
- }
- if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- b43err(ring->dev->wl, "Failed to allocate DMA "
- "ringmemory that fits device constraints\n");
- return NULL;
- }
- }
- /* We expect the memory to be 4k aligned, at least. */
- if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- return NULL;
- }
-
- return base;
-}
-
static int alloc_ringmemory(struct b43_dmaring *ring)
{
- unsigned int required;
- void *base;
- dma_addr_t dmaaddr;
-
- /* There are several requirements to the descriptor ring memory:
- * - The memory region needs to fit the address constraints for the
- * device (same as for frame buffers).
- * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
- * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
+ gfp_t flags = GFP_KERNEL;
+
+ /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
+ * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
+ * has shown that 4K is sufficient for the latter as long as the buffer
+ * does not cross an 8K boundary.
+ *
+ * For unknown reasons - possibly a hardware error - the BCM4311 rev
+ * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
+ * which accounts for the GFP_DMA flag below.
+ *
+ * The flags here must match the flags in free_ringmemory below!
*/
-
if (ring->type == B43_DMA_64BIT)
- required = ring->nr_slots * sizeof(struct b43_dmadesc64);
- else
- required = ring->nr_slots * sizeof(struct b43_dmadesc32);
- if (B43_WARN_ON(required > 0x1000))
- return -ENOMEM;
-
- ring->alloc_descsize = 0x1000;
- base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
- if (!base)
+ flags |= GFP_DMA;
+ ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
+ B43_DMA_RINGMEMSIZE,
+ &(ring->dmabase), flags);
+ if (!ring->descbase) {
+ b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
- ring->alloc_descbase = base;
- ring->alloc_dmabase = dmaaddr;
-
- if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
- /* We're on <=32bit DMA, or we already got 8k aligned memory.
- * That's all we need, so we're fine. */
- ring->descbase = base;
- ring->dmabase = dmaaddr;
- return 0;
}
- b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
-
- /* Ok, we failed at the 8k alignment requirement.
- * Try to force-align the memory region now. */
- ring->alloc_descsize = 0x2000;
- base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
- if (!base)
- return -ENOMEM;
- ring->alloc_descbase = base;
- ring->alloc_dmabase = dmaaddr;
-
- if (is_8k_aligned(dmaaddr)) {
- /* We're already 8k aligned. That Ok, too. */
- ring->descbase = base;
- ring->dmabase = dmaaddr;
- return 0;
- }
- /* Force-align it to 8k */
- ring->descbase = (void *)((u8 *)base + 0x1000);
- ring->dmabase = dmaaddr + 0x1000;
- B43_WARN_ON(!is_8k_aligned(ring->dmabase));
+ memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
return 0;
}
static void free_ringmemory(struct b43_dmaring *ring)
{
- b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
- ring->alloc_dmabase, ring->alloc_descsize);
+ gfp_t flags = GFP_KERNEL;
+
+ if (ring->type == B43_DMA_64BIT)
+ flags |= GFP_DMA;
+
+ ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase, flags);
}
/* Reset the RX DMA channel */
@@ -646,14 +531,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
return 1;
- if (!b43_dma_address_ok(ring, addr, buffersize)) {
- /* We can't support this address. Unmap it again. */
- unmap_descbuffer(ring, addr, buffersize, dma_to_device);
- return 1;
+ switch (ring->type) {
+ case B43_DMA_30BIT:
+ if ((u64)addr + buffersize > (1ULL << 30))
+ goto address_error;
+ break;
+ case B43_DMA_32BIT:
+ if ((u64)addr + buffersize > (1ULL << 32))
+ goto address_error;
+ break;
+ case B43_DMA_64BIT:
+ /* Currently we can't have addresses beyond
+ * 64bit in the kernel. */
+ break;
}
/* The address is OK. */
return 0;
+
+address_error:
+ /* We can't support this address. Unmap it again. */
+ unmap_descbuffer(ring, addr, buffersize, dma_to_device);
+
+ return 1;
}
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -715,9 +615,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
meta->dmaaddr = dmaaddr;
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- ring->alloc_dmabase,
- ring->alloc_descsize, DMA_TO_DEVICE);
return 0;
}
@@ -1354,9 +1251,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
}
/* Now transfer the whole frame. */
wmb();
- ssb_dma_sync_single_for_device(ring->dev->dev,
- ring->alloc_dmabase,
- ring->alloc_descsize, DMA_TO_DEVICE);
ops->poke_tx(ring, next_slot(ring, slot));
return 0;
@@ -1476,7 +1370,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "DMA tx mapping failure\n");
goto out;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1607,22 +1500,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_dmaring *ring;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = select_ring_by_priority(dev, i);
-
- stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
- stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
- stats[i].count = ring->nr_tx_packets;
- }
-}
-
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
const struct b43_dma_ops *ops = ring->ops;
@@ -1760,7 +1637,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
-#ifdef CONFIG_B43_PIO
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
u16 mmio_base, bool enable)
{
@@ -1794,4 +1670,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
}
-#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index e607b392314c..dc91944d6022 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -157,6 +157,7 @@ struct b43_dmadesc_generic {
} __attribute__ ((__packed__));
/* Misc DMA constants */
+#define B43_DMA_RINGMEMSIZE PAGE_SIZE
#define B43_DMA0_RX_FRAMEOFFSET 30
/* DMA engine tuning knobs */
@@ -227,8 +228,6 @@ struct b43_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -246,12 +245,6 @@ struct b43_dmaring {
/* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */
u8 queue_prio;
- /* Pointers and size of the originally allocated and mapped memory
- * region for the descriptor ring. */
- void *alloc_descbase;
- dma_addr_t alloc_dmabase;
- unsigned int alloc_descsize;
- /* Pointer to our wireless device. */
struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */
@@ -283,9 +276,6 @@ void b43_dma_free(struct b43_wldev *dev);
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43_dma_tx(struct b43_wldev *dev,
struct sk_buff *skb);
void b43_dma_handle_txstatus(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 976104f634a1..94e4f1378fc3 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -34,6 +34,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/slab.h>
static struct b43_lo_calib *b43_find_lo_calib(struct b43_txpower_lo_control *lo,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f26..9a374ef83a22 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -42,6 +42,7 @@
#include <linux/skbuff.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
#include "b43.h"
@@ -67,7 +68,12 @@ MODULE_AUTHOR("Gábor Stefanik");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
-
+MODULE_FIRMWARE("b43/ucode11.fw");
+MODULE_FIRMWARE("b43/ucode13.fw");
+MODULE_FIRMWARE("b43/ucode14.fw");
+MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode5.fw");
+MODULE_FIRMWARE("b43/ucode9.fw");
static int modparam_bad_frames_preempt;
module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -102,6 +108,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
+int b43_modparam_pio = B43_PIO_DEFAULT;
+module_param_named(pio, b43_modparam_pio, int, 0644);
+MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -110,6 +119,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
@@ -628,10 +638,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
{
/* slot_time is in usec. */
- if (dev->phy.type != B43_PHYTYPE_G)
+ /* This test used to exit for all but a G PHY. */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
return;
- b43_write16(dev, 0x684, 510 + slot_time);
- b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
+ /* Shared memory location 0x0010 is the slot time and should be
+ * set to slot_time; however, this register is initially 0 and changing
+ * the value adversely affects the transmit rate for BCM4311
+ * devices. Until this behavior is unterstood, delete this step
+ *
+ * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ */
}
static void b43_short_slot_timing_enable(struct b43_wldev *dev)
@@ -835,8 +852,10 @@ static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32,
}
static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -845,19 +864,19 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
if (B43_WARN_ON(!modparam_hwtkip))
return;
- mutex_lock(&wl->mutex);
-
+ /* This is only called from the RX path through mac80211, where
+ * our mutex is already locked. */
+ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
dev = wl->current_dev;
- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
- goto out_unlock;
+ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
rx_tkip_phase1_write(dev, index, iv32, phase1key);
- keymac_write(dev, index, addr);
-
-out_unlock:
- mutex_unlock(&wl->mutex);
+ /* only pairwise TKIP keys are supported right now */
+ if (WARN_ON(!sta))
+ return;
+ keymac_write(dev, index, sta->addr);
}
static void do_key_write(struct b43_wldev *dev,
@@ -1786,8 +1805,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
dma_reason[4], dma_reason[5]);
b43err(dev->wl, "This device does not support DMA "
"on your system. Please use PIO instead.\n");
- b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
- "your kernel configuration.\n");
+ /* Fall back to PIO transfers if we get fatal DMA errors! */
+ dev->use_pio = 1;
+ b43_controller_restart(dev, "DMA error");
return;
}
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -3338,27 +3358,6 @@ out_unlock:
return err;
}
-static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43_wl *wl = hw_to_b43_wl(hw);
- struct b43_wldev *dev;
- int err = -ENODEV;
-
- mutex_lock(&wl->mutex);
- dev = wl->current_dev;
- if (dev && b43_status(dev) >= B43_STAT_STARTED) {
- if (b43_using_pio_transfers(dev))
- b43_pio_get_tx_stats(dev, stats);
- else
- b43_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- mutex_unlock(&wl->mutex);
-
- return err;
-}
-
static int b43_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -3562,6 +3561,12 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
dev = wl->current_dev;
phy = &dev->phy;
+ if (conf_is_ht(conf))
+ phy->is_40mhz =
+ (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf));
+ else
+ phy->is_40mhz = false;
+
b43_mac_suspend(dev);
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
@@ -3963,6 +3968,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43_set_status(dev, B43_STAT_STARTED);
/* Start data flow (TX/RX). */
@@ -4353,7 +4359,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
(dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
- B43_FORCE_PIO) {
+ dev->use_pio) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4372,8 +4378,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ieee80211_wake_queues(dev->wl->hw);
- ieee80211_wake_queues(dev->wl->hw);
-
b43_set_status(dev, B43_STAT_INITIALIZED);
out:
@@ -4388,7 +4392,7 @@ err_busdown:
}
static int b43_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -4396,24 +4400,24 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_MESH_POINT &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_MESH_POINT &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43dbg(wl, "Adding Interface type %d\n", conf->type);
+ b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
b43_adjust_opmode(dev);
b43_set_pretbtt(dev);
@@ -4428,17 +4432,17 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
}
static void b43_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
- b43dbg(wl, "Removing Interface type %d\n", conf->type);
+ b43dbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43_WARN_ON(!wl->operating);
- B43_WARN_ON(wl->vif != conf->vif);
+ B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
@@ -4579,7 +4583,6 @@ static const struct ieee80211_ops b43_hw_ops = {
.set_key = b43_op_set_key,
.update_tkip_key = b43_op_update_tkip_key,
.get_stats = b43_op_get_stats,
- .get_tx_stats = b43_op_get_tx_stats,
.get_tsf = b43_op_get_tsf,
.set_tsf = b43_op_set_tsf,
.start = b43_op_start,
@@ -4823,6 +4826,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
if (!wldev)
goto out;
+ wldev->use_pio = b43_modparam_pio;
wldev->dev = dev;
wldev->wl = wl;
b43_set_status(wldev, B43_STAT_UNINIT);
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 984174bc7b0f..609e7051e018 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -24,6 +24,7 @@
#include "pcmcia.h"
#include <linux/ssb/ssb.h>
+#include <linux/slab.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
index d90217c3a706..b6428ec16dd6 100644
--- a/drivers/net/wireless/b43/phy_a.c
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -26,6 +26,8 @@
*/
+#include <linux/slab.h>
+
#include "b43.h"
#include "phy_a.h"
#include "phy_common.h"
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 75b26e175e8f..8f7d7eff2d80 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -421,3 +421,48 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
{
b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
+struct b43_c32 b43_cordic(int theta)
+{
+ u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
+ 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
+ 229, 115, 57, 29, };
+ u8 i;
+ s32 tmp;
+ s8 signx = 1;
+ u32 angle = 0;
+ struct b43_c32 ret = { .i = 39797, .q = 0, };
+
+ while (theta > (180 << 16))
+ theta -= (360 << 16);
+ while (theta < -(180 << 16))
+ theta += (360 << 16);
+
+ if (theta > (90 << 16)) {
+ theta -= (180 << 16);
+ signx = -1;
+ } else if (theta < -(90 << 16)) {
+ theta += (180 << 16);
+ signx = -1;
+ }
+
+ for (i = 0; i <= 17; i++) {
+ if (theta > angle) {
+ tmp = ret.i - (ret.q >> i);
+ ret.q += ret.i >> i;
+ ret.i = tmp;
+ angle += arctg[i];
+ } else {
+ tmp = ret.i + (ret.q >> i);
+ ret.q -= ret.i >> i;
+ ret.i = tmp;
+ angle -= arctg[i];
+ }
+ }
+
+ ret.i *= signx;
+ ret.q *= signx;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9edd4e8e0c85..bd480b481bfc 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -5,6 +5,12 @@
struct b43_wldev;
+/* Complex number using 2 32-bit signed integers */
+struct b43_c32 { s32 i, q; };
+
+#define CORDIC_CONVERT(value) (((value) >= 0) ? \
+ ((((value) >> 15) + 1) >> 1) : \
+ -((((-(value)) >> 15) + 1) >> 1))
/* PHY register routing bits */
#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
@@ -212,6 +218,9 @@ struct b43_phy {
bool supports_2ghz;
bool supports_5ghz;
+ /* HT info */
+ bool is_40mhz;
+
/* GMODE bit enabled? */
bool gmode;
@@ -418,5 +427,6 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
*/
void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
+struct b43_c32 b43_cordic(int theta);
#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 382826a8da82..29bf34ced865 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -33,6 +33,7 @@
#include "main.h"
#include <linux/bitrev.h>
+#include <linux/slab.h>
static const s8 b43_tssi2dbm_g_table[] = {
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff86..c6afe9d94590 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -23,6 +23,8 @@
*/
+#include <linux/slab.h>
+
#include "b43.h"
#include "main.h"
#include "phy_lp.h"
@@ -80,6 +82,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
dev->phy.lp = NULL;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +104,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
maxpwr = bus->sprom.maxpwr_bg;
lpphy->max_tx_pwr_med_band = maxpwr;
cckpo = bus->sprom.cck2gpo;
+ /*
+ * We don't read SPROM's opo as specs say. On rev8 SPROMs
+ * opo == ofdm2gpo and we don't know any SSB with LP-PHY
+ * and SPROM rev below 8.
+ */
+ B43_WARN_ON(bus->sprom.revision < 8);
ofdmpo = bus->sprom.ofdm2gpo;
if (cckpo) {
for (i = 0; i < 4; i++) {
@@ -1703,19 +1712,6 @@ static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
.c0 = 0,
};
-static u8 lpphy_nbits(s32 val)
-{
- u32 tmp = abs(val);
- u8 nbits = 0;
-
- while (tmp != 0) {
- nbits++;
- tmp >>= 1;
- }
-
- return nbits;
-}
-
static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
{
struct lpphy_iq_est iq_est;
@@ -1742,8 +1738,8 @@ static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
goto out;
}
- prod_msb = lpphy_nbits(prod);
- q_msb = lpphy_nbits(qpwr);
+ prod_msb = fls(abs(prod));
+ q_msb = fls(abs(qpwr));
tmp1 = prod_msb - 20;
if (tmp1 >= 0) {
@@ -1773,47 +1769,6 @@ out:
return ret;
}
-/* Complex number using 2 32-bit signed integers */
-typedef struct {s32 i, q;} lpphy_c32;
-
-static lpphy_c32 lpphy_cordic(int theta)
-{
- u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
- 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
- 229, 115, 57, 29, };
- int i, tmp, signx = 1, angle = 0;
- lpphy_c32 ret = { .i = 39797, .q = 0, };
-
- theta = clamp_t(int, theta, -180, 180);
-
- if (theta > 90) {
- theta -= 180;
- signx = -1;
- } else if (theta < -90) {
- theta += 180;
- signx = -1;
- }
-
- for (i = 0; i <= 17; i++) {
- if (theta > angle) {
- tmp = ret.i - (ret.q >> i);
- ret.q += ret.i >> i;
- ret.i = tmp;
- angle += arctg[i];
- } else {
- tmp = ret.i + (ret.q >> i);
- ret.q -= ret.i >> i;
- ret.i = tmp;
- angle -= arctg[i];
- }
- }
-
- ret.i *= signx;
- ret.q *= signx;
-
- return ret;
-}
-
static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
u16 wait)
{
@@ -1831,8 +1786,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 buf[64];
- int i, samples = 0, angle = 0, rotation = (9 * freq) / 500;
- lpphy_c32 sample;
+ int i, samples = 0, angle = 0;
+ int rotation = (((36 * freq) / 20) << 16) / 100;
+ struct b43_c32 sample;
lpphy->tx_tone_freq = freq;
@@ -1848,10 +1804,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
}
for (i = 0; i < samples; i++) {
- sample = lpphy_cordic(angle);
+ sample = b43_cordic(angle);
angle += rotation;
- buf[i] = ((sample.i * max) & 0xFF) << 8;
- buf[i] |= (sample.q * max) & 0xFF;
+ buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
+ buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
}
b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 992318a78077..9c7cd282e46c 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -23,12 +23,56 @@
*/
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "b43.h"
#include "phy_n.h"
#include "tables_nphy.h"
+#include "main.h"
+struct nphy_txgains {
+ u16 txgm[2];
+ u16 pga[2];
+ u16 pad[2];
+ u16 ipa[2];
+};
+
+struct nphy_iqcal_params {
+ u16 txgm;
+ u16 pga;
+ u16 pad;
+ u16 ipa;
+ u16 cal_gain;
+ u16 ncorr[5];
+};
+
+struct nphy_iq_est {
+ s32 iq0_prod;
+ u32 i0_pwr;
+ u32 q0_pwr;
+ s32 iq1_prod;
+ u32 i1_pwr;
+ u32 q1_pwr;
+};
+
+enum b43_nphy_rf_sequence {
+ B43_RFSEQ_RX2TX,
+ B43_RFSEQ_TX2RX,
+ B43_RFSEQ_RESET2RX,
+ B43_RFSEQ_UPDATE_GAINH,
+ B43_RFSEQ_UPDATE_GAINL,
+ B43_RFSEQ_UPDATE_GAINU,
+};
+
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length);
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+ enum b43_nphy_rf_sequence seq);
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off);
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core);
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -197,173 +241,1020 @@ void b43_nphy_radio_turn_off(struct b43_wldev *dev)
~B43_NPHY_RFCTL_CMD_EN);
}
-#define ntab_upload(dev, offset, data) do { \
- unsigned int i; \
- for (i = 0; i < (offset##_SIZE); i++) \
- b43_ntab_write(dev, (offset) + i, (data)[i]); \
- } while (0)
-
-/* Upload the N-PHY tables. */
+/*
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
+ */
static void b43_nphy_tables_init(struct b43_wldev *dev)
{
- /* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
- ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
- ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
- ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
- ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
- ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
- ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
- ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
- ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
- ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
- ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
- ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
- ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
- ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
-
- /* Volatile tables */
- ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
- ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
- ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
- ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
- ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
- ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
- ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
- ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
- ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
- ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
- ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
- ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+ if (dev->phy.rev < 3)
+ b43_nphy_rev0_1_2_tables_init(dev);
+ else
+ b43_nphy_rev3plus_tables_init(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ enum ieee80211_band band;
+ u16 tmp;
+
+ if (!enable) {
+ nphy->rfctrl_intc1_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC1);
+ nphy->rfctrl_intc2_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC2);
+ band = b43_current_band(dev->wl);
+ if (dev->phy.rev >= 3) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x600;
+ else
+ tmp = 0x480;
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x180;
+ else
+ tmp = 0x120;
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+ nphy->rfctrl_intc1_save);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+ nphy->rfctrl_intc2_save);
+ }
}
-static void b43_nphy_workarounds(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
+
+ if (dev->phy.rev >= 3) {
+ if (ipa) {
+ tmp = 4;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+
+ tmp = 1;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
+static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
+{
+ u32 tmslow;
+
+ if (dev->phy.type != B43_PHYTYPE_N)
+ return;
+
+ tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
+ if (force)
+ tmslow |= SSB_TMSLOW_FGC;
+ else
+ tmslow &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+ u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+ mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+ if (preamble == 1)
+ mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+ else
+ mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+ b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ bool override = false;
+ u16 chain = 0x33;
+
+ if (nphy->txrx_chain == 0) {
+ chain = 0x11;
+ override = true;
+ } else if (nphy->txrx_chain == 1) {
+ chain = 0x22;
+ override = true;
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+ chain);
+
+ if (override)
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER);
+ else
+ b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+ ~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+ u16 samps, u8 time, bool wait)
+{
+ int i;
+ u16 tmp;
+
+ b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+ b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+ if (wait)
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+ else
+ b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+ for (i = 1000; i; i--) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+ if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+ est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+ est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+ est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+ est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+ est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+ est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+ return;
+ }
+ udelay(10);
+ }
+ memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+ struct b43_phy_n_iq_comp *pcomp)
+{
+ if (write) {
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+ } else {
+ pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+ pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+ pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+ pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+ if (core == 0) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+ u8 rxval, txval;
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ if (core == 0) {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ } else {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ }
+ regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+ regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+ (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+ (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+ if (core == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+ }
+
+ b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+ b43_nphy_rf_control_override(dev, 8, 0, 3, false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+ if (core == 0) {
+ rxval = 1;
+ txval = 8;
+ } else {
+ rxval = 4;
+ txval = 2;
+ }
+ b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+ b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+ int i;
+ s32 iq;
+ u32 ii;
+ u32 qq;
+ int iq_nbits, qq_nbits;
+ int arsh, brsh;
+ u16 tmp, a, b;
+
+ struct nphy_iq_est est;
+ struct b43_phy_n_iq_comp old;
+ struct b43_phy_n_iq_comp new = { };
+ bool error = false;
+
+ if (mask == 0)
+ return;
+
+ b43_nphy_rx_iq_coeffs(dev, false, &old);
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+ b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+ new = old;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0 && (mask & 1)) {
+ iq = est.iq0_prod;
+ ii = est.i0_pwr;
+ qq = est.q0_pwr;
+ } else if (i == 1 && (mask & 2)) {
+ iq = est.iq1_prod;
+ ii = est.i1_pwr;
+ qq = est.q1_pwr;
+ } else {
+ B43_WARN_ON(1);
+ continue;
+ }
+
+ if (ii + qq < 2) {
+ error = true;
+ break;
+ }
+
+ iq_nbits = fls(abs(iq));
+ qq_nbits = fls(qq);
+
+ arsh = iq_nbits - 20;
+ if (arsh >= 0) {
+ a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ tmp = ii >> arsh;
+ } else {
+ a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ tmp = ii << -arsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ a /= tmp;
+
+ brsh = qq_nbits - 11;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii >> brsh;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii << -brsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+ if (i == 0 && (mask & 0x1)) {
+ if (dev->phy.rev >= 3) {
+ new.a0 = a & 0x3FF;
+ new.b0 = b & 0x3FF;
+ } else {
+ new.a0 = b & 0x3FF;
+ new.b0 = a & 0x3FF;
+ }
+ } else if (i == 1 && (mask & 0x2)) {
+ if (dev->phy.rev >= 3) {
+ new.a1 = a & 0x3FF;
+ new.b1 = b & 0x3FF;
+ } else {
+ new.a1 = b & 0x3FF;
+ new.b1 = a & 0x3FF;
+ }
+ }
+ }
+
+ if (error)
+ new = old;
+
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+ u16 array[4];
+ int i;
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
+ for (i = 0; i < 4; i++)
+ array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_suspend(dev);
+
+ tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+ tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+ B43_NPHY_CLASSCTL_WAITEDEN);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_enable(dev);
+
+ return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
{
struct b43_phy *phy = &dev->phy;
- unsigned int i;
+ struct b43_phy_n *nphy = phy->n;
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
- if (1 /* FIXME band is 2.4GHz */) {
- b43_phy_set(dev, B43_NPHY_CLASSCTL,
- B43_NPHY_CLASSCTL_CCKEN);
- } else {
- b43_phy_mask(dev, B43_NPHY_CLASSCTL,
- ~B43_NPHY_CLASSCTL_CCKEN);
- }
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_phy_write(dev, B43_NPHY_TXFRAMEDELAY, 8);
-
- /* Fixup some tables */
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x800);
-
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
- //TODO set RF sequence
-
- /* Set narrowband clip threshold */
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 66);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 66);
-
- /* Set wideband clip 2 threshold */
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C1_CLIPWBTHRES_CLIP2_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C2_CLIPWBTHRES_CLIP2_SHIFT);
-
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- if (0 /*FIXME*/) {
- /* Set dwell lengths */
- b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 9);
- b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 9);
-
- /* Set gain backoff */
- b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
- ~B43_NPHY_C1_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C1_CGAINI_GAINBKOFF_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
- ~B43_NPHY_C2_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C2_CGAINI_GAINBKOFF_SHIFT);
+ if (enable) {
+ u16 clip[] = { 0xFFFF, 0xFFFF };
+ if (nphy->deaf_count++ == 0) {
+ nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_read_clip_detection(dev, nphy->clip_state);
+ b43_nphy_write_clip_detection(dev, clip);
+ }
+ b43_nphy_reset_cca(dev);
+ } else {
+ if (--nphy->deaf_count == 0) {
+ b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+ b43_nphy_write_clip_detection(dev, nphy->clip_state);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000);
+
+ b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+ if (nphy->bb_mult_save & 0x80000000) {
+ tmp = nphy->bb_mult_save & 0xFFFF;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+ nphy->bb_mult_save = 0;
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ unsigned int channel;
+ int tone[2] = { 57, 58 };
+ u32 noise[2] = { 0x3FF, 0x3FF };
+
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ /* FIXME: channel = radio_chanspec */
+
+ if (nphy->gband_spurwar_en) {
+ /* TODO: N PHY Adjust Analog Pfbw (7) */
+ if (channel == 11 && dev->phy.is_40mhz)
+ ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ /* TODO: N PHY Adjust CRS Min Power (0x1E) */
+ }
+
+ if (nphy->aband_spurwar_en) {
+ if (channel == 54) {
+ tone[0] = 0x20;
+ noise[0] = 0x25F;
+ } else if (channel == 38 || channel == 102 || channel == 118) {
+ if (0 /* FIXME */) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+ } else if (channel == 134) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else if (channel == 151) {
+ tone[0] = 0x10;
+ noise[0] = 0x23F;
+ } else if (channel == 153 || channel == 161) {
+ tone[0] = 0x30;
+ noise[0] = 0x23F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+
+ if (!tone[0] && !noise[0])
+ ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i, j;
+ u8 code;
+
+ /* TODO: for PHY >= 3
+ s8 *lna1_gain, *lna2_gain;
+ u8 *gain_db, *gain_bits;
+ u16 *rfseq_init;
+ u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+ u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+ */
+
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ } else {
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ /* Set narrowband clip threshold */
+ b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+ if (!dev->phy.is_40mhz) {
+ /* Set dwell lengths */
+ b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ }
+
+ /* Set wideband clip 2 threshold */
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
+ 21);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
+ 21);
+
+ if (!dev->phy.is_40mhz) {
+ b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+ ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+ ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+ ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+ ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+ }
+
+ b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ dev->phy.is_40mhz)
+ code = 4;
+ else
+ code = 5;
+ } else {
+ code = dev->phy.is_40mhz ? 6 : 7;
+ }
/* Set HPVGA2 index */
b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
~B43_NPHY_C1_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
~B43_NPHY_C2_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+
+ /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
+
+ if (nphy->elna_gain_config) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ }
- //FIXME verify that the specs really mean to use autoinc here.
- for (i = 0; i < 3; i++)
- b43_ntab_write(dev, B43_NTAB16(7, 0x106) + i, 0x673);
+ if (dev->phy.rev == 2) {
+ for (i = 0; i < 4; i++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (0x0400 * i) + 0x0020);
+ for (j = 0; j < 21; j++)
+ b43_phy_write(dev,
+ B43_NPHY_TABLE_DATALO, 3 * j);
+ }
+
+ b43_nphy_set_rf_sequence(dev, 5,
+ rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+ 0xFF80, 4);
+ }
}
+}
- /* Set minimum gain value */
- b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN,
- ~B43_NPHY_C1_MINGAIN,
- 23 << B43_NPHY_C1_MINGAIN_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN,
- ~B43_NPHY_C2_MINGAIN,
- 23 << B43_NPHY_C2_MINGAIN_SHIFT);
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
- if (phy->rev < 2) {
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
+
+ /* TODO: convert to b43_ntab_write? */
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+
+ if (dev->phy.rev < 2) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+ if (bus->sprom.boardflags2_lo & 0x100 &&
+ bus->boardinfo.type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+ b43_nphy_gain_crtl_workarounds(dev);
+
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
+
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ (u16)~B43_NPHY_PIL_DW_64QAM);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
}
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
}
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
+static int b43_nphy_load_samples(struct b43_wldev *dev,
+ struct b43_c32 *samples, u16 len) {
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 i;
+ u32 *data;
+
+ data = kzalloc(len * sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ b43err(dev->wl, "allocation for samples loading failed\n");
+ return -ENOMEM;
+ }
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ for (i = 0; i < len; i++) {
+ data[i] = (samples[i].i & 0x3FF << 10);
+ data[i] |= samples[i].q & 0x3FF;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data);
+
+ kfree(data);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
+static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
+ bool test)
{
- u16 bbcfg;
+ int i;
+ u16 bw, len, rot, angle;
+ struct b43_c32 *samples;
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | SSB_TMSLOW_FGC);
- bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
- b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTCCA);
- b43_phy_write(dev, B43_NPHY_BBCFG,
- bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) & ~SSB_TMSLOW_FGC);
+
+ bw = (dev->phy.is_40mhz) ? 40 : 20;
+ len = bw << 3;
+
+ if (test) {
+ if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX)
+ bw = 82;
+ else
+ bw = 80;
+
+ if (dev->phy.is_40mhz)
+ bw <<= 1;
+
+ len = bw << 1;
+ }
+
+ samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
+ if (!samples) {
+ b43err(dev->wl, "allocation for samples generation failed\n");
+ return 0;
+ }
+ rot = (((freq * 36) / bw) << 16) / 100;
+ angle = 0;
+
+ for (i = 0; i < len; i++) {
+ samples[i] = b43_cordic(angle);
+ angle += rot;
+ samples[i].q = CORDIC_CONVERT(samples[i].q * max);
+ samples[i].i = CORDIC_CONVERT(samples[i].i * max);
+ }
+
+ i = b43_nphy_load_samples(dev, samples, len);
+ kfree(samples);
+ return (i < 0) ? 0 : len;
}
-enum b43_nphy_rf_sequence {
- B43_RFSEQ_RX2TX,
- B43_RFSEQ_TX2RX,
- B43_RFSEQ_RESET2RX,
- B43_RFSEQ_UPDATE_GAINH,
- B43_RFSEQ_UPDATE_GAINL,
- B43_RFSEQ_UPDATE_GAINU,
-};
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
+static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
+ u16 wait, bool iqmode, bool dac_test)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 seq_mode;
+ u32 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if ((nphy->bb_mult_save & 0x80000000) == 0) {
+ tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
+ nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
+ }
+
+ if (!dev->phy.is_40mhz)
+ tmp = 0x6464;
+ else
+ tmp = 0x4747;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
+
+ if (loops != 0xFFFF)
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1));
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait);
+
+ seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER);
+ if (iqmode) {
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+ b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000);
+ } else {
+ if (dac_test)
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5);
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
+ }
+ for (i = 0; i < 100; i++) {
+ if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ b43err(dev->wl, "run samples timeout\n");
+
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+ bool iqmode, bool dac_test)
+{
+ u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+ if (samp == 0)
+ return -1;
+ b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j;
+ u32 tmp;
+ u32 cur_real, cur_imag, real_part, imag_part;
+
+ u16 buffer[7];
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+ for (i = 0; i < 2; i++) {
+ tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+ (buffer[i * 2 + 1] & 0x3FF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 320));
+ for (j = 0; j < 128; j++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ tmp = buffer[5 + i];
+ real_part = (tmp >> 8) & 0xFF;
+ imag_part = (tmp & 0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 448));
+
+ if (dev->phy.rev >= 3) {
+ cur_real = real_part;
+ cur_imag = imag_part;
+ tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+ }
+
+ for (j = 0; j < 128; j++) {
+ if (dev->phy.rev < 3) {
+ cur_real = (real_part * loscale[j] + 128) >> 8;
+ cur_imag = (imag_part * loscale[j] + 128) >> 8;
+ tmp = ((cur_real & 0xFF) << 8) |
+ (cur_imag & 0xFF);
+ }
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+ u16 offset1 = cmd << 4;
+ u16 offset2 = offset1 + 0x80;
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+ for (i = length; i < 16; i++) {
+ b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+ b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
enum b43_nphy_rf_sequence seq)
{
@@ -376,6 +1267,7 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
[B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
};
int i;
+ u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
@@ -389,8 +1281,181 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
}
b43err(dev->wl, "RF sequence status timeout\n");
ok:
- b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
- ~(B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER));
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off)
+{
+ int i;
+ u8 index = fls(field);
+ u8 addr, en_addr, val_addr;
+ /* we expect only one bit set */
+ B43_WARN_ON(field & (~(1 << (index - 1))));
+
+ if (dev->phy.rev >= 3) {
+ const struct nphy_rf_control_override_rev3 *rf_ctrl;
+ for (i = 0; i < 2; i++) {
+ if (index == 0 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+ en_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+ val_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~(field));
+ b43_phy_mask(dev, val_addr,
+ ~(rf_ctrl->val_mask));
+ } else {
+ if (core == 0 || ((1 << core) & i) != 0) {
+ b43_phy_set(dev, en_addr, field);
+ b43_phy_maskset(dev, val_addr,
+ ~(rf_ctrl->val_mask),
+ (value << rf_ctrl->val_shift));
+ }
+ }
+ }
+ } else {
+ const struct nphy_rf_control_override_rev2 *rf_ctrl;
+ if (off) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+ value = 0;
+ } else {
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (index <= 1 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ if (index == 2 || index == 10 ||
+ (index >= 13 && index <= 15)) {
+ core = 1;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+ addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->addr0 : rf_ctrl->addr1);
+
+ if ((core & (1 << i)) != 0)
+ b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+ (value << rf_ctrl->shift));
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core)
+{
+ u8 i, j;
+ u16 reg, tmp, val;
+
+ B43_WARN_ON(dev->phy.rev < 3);
+ B43_WARN_ON(field > 4);
+
+ for (i = 0; i < 2; i++) {
+ if ((core == 1 && i == 1) || (core == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+ b43_phy_mask(dev, reg, 0xFBFF);
+
+ switch (field) {
+ case 0:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case 1:
+ if (!i) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_RXTX);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE);
+ }
+ break;
+ case 2:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0020;
+ val = value << 5;
+ } else {
+ tmp = 0x0010;
+ val = value << 4;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 3:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 4:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ }
+ }
}
static void b43_nphy_bphy_init(struct b43_wldev *dev)
@@ -411,81 +1476,1680 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
}
-/* RSSI Calibration */
-static void b43_nphy_rssi_cal(struct b43_wldev *dev, u8 type)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
+static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
+ s8 offset, u8 core, u8 rail, u8 type)
{
- //TODO
+ u16 tmp;
+ bool core1or5 = (core == 1) || (core == 5);
+ bool core2or5 = (core == 2) || (core == 5);
+
+ offset = clamp_val(offset, -32, 31);
+ tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
+
+ if (core1or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
+ if (core1or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
+ if (core2or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
+ if (core2or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
+ if (core1or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
+ if (core1or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
+ if (core2or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
+ if (core2or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
+ if (core1or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
+ if (core1or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
+ if (core2or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
+ if (core2or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
+ if (core1or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
+ if (core1or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
+ if (core2or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
+ if (core2or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
+ if (core1or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
+ if (core1or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
+ if (core2or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
+ if (core2or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
+ if (core1or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
+ if (core2or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
+ if (core1or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
+ if (core2or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
+}
+
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ u16 val;
+
+ if (type < 3)
+ val = 0;
+ else if (type == 6)
+ val = 1;
+ else if (type == 3)
+ val = 2;
+ else
+ val = 3;
+
+ val = (val << 12) | (val << 14);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+ (type + 1) << 4);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+ (type + 1) << 4);
+ }
+
+ /* TODO use some definitions */
+ if (code == 0) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
+ 0x3000);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+ 0xFEC7, 0x0180);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xEFDC, (code << 1 | 0x1021));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ }
+}
+
+static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 reg, val;
+
+ if (code == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3);
+ } else {
+ for (i = 0; i < 2; i++) {
+ if ((code == 1 && i == 1) || (code == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER;
+ b43_phy_maskset(dev, reg, 0xFDFF, 0x0200);
+
+ if (type < 3) {
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+ b43_phy_maskset(dev, reg, 0xFCFF, 0);
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_LUT_TRSW_UP1 :
+ B43_NPHY_RFCTL_LUT_TRSW_UP2;
+ b43_phy_maskset(dev, reg, 0xFFC3, 0);
+
+ if (type == 0)
+ val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+ else if (type == 1)
+ val = 16;
+ else
+ val = 32;
+ b43_phy_set(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_TXF_40CO_B1S0 :
+ B43_NPHY_TXF_40CO_B32S1;
+ b43_phy_set(dev, reg, 0x0020);
+ } else {
+ if (type == 6)
+ val = 0x0100;
+ else if (type == 3)
+ val = 0x0200;
+ else
+ val = 0x0300;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+
+ b43_phy_maskset(dev, reg, 0xFCFF, val);
+ b43_phy_maskset(dev, reg, 0xF3FF, val << 2);
+
+ if (type != 3 && type != 6) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ))
+ val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ else
+ val = 0x11;
+ reg = (i == 0) ? 0x2000 : 0x3000;
+ reg |= B2055_PADDRV;
+ b43_radio_write16(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 :
+ B43_NPHY_AFECTL_OVER;
+ b43_phy_set(dev, reg, 0x0200);
+ }
+ }
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
+static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ if (dev->phy.rev >= 3)
+ b43_nphy_rev3_rssi_select(dev, code, type);
+ else
+ b43_nphy_rev2_rssi_select(dev, code, type);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
+static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
+{
+ int i;
+ for (i = 0; i < 2; i++) {
+ if (type == 2) {
+ if (i == 0) {
+ b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
+ 0xFC, buf[0]);
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xFC, buf[1]);
+ } else {
+ b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM,
+ 0xFC, buf[2 * i]);
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xFC, buf[2 * i + 1]);
+ }
+ } else {
+ if (i == 0)
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xF3, buf[0] << 2);
+ else
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xF3, buf[2 * i + 1] << 2);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
+static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
+ u8 nsamp)
+{
+ int i;
+ int out;
+ u16 save_regs_phy[9];
+ u16 s[2];
+
+ if (dev->phy.rev >= 3) {
+ save_regs_phy[0] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1);
+ save_regs_phy[1] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP2);
+ save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
+ save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
+ }
+
+ b43_nphy_rssi_select(dev, 5, type);
+
+ if (dev->phy.rev < 2) {
+ save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5);
+ }
+
+ for (i = 0; i < 4; i++)
+ buf[i] = 0;
+
+ for (i = 0; i < nsamp; i++) {
+ if (dev->phy.rev < 2) {
+ s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT);
+ s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT);
+ } else {
+ s[0] = b43_phy_read(dev, B43_NPHY_RSSI1);
+ s[1] = b43_phy_read(dev, B43_NPHY_RSSI2);
+ }
+
+ buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2;
+ buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
+ buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2;
+ buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
+ }
+ out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
+ (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
+
+ if (dev->phy.rev < 2)
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
+ save_regs_phy[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ save_regs_phy[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
+ }
+
+ return out;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
+static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
+{
+ int i, j;
+ u8 state[4];
+ u8 code, val;
+ u16 class, override;
+ u8 regs_save_radio[2];
+ u16 regs_save_phy[2];
+ s8 offset[4];
+
+ u16 clip_state[2];
+ u16 clip_off[2] = { 0xFFFF, 0xFFFF };
+ s32 results_min[4] = { };
+ u8 vcm_final[4] = { };
+ s32 results[4][4] = { };
+ s32 miniq[4][2] = { };
+
+ if (type == 2) {
+ code = 0;
+ val = 6;
+ } else if (type < 2) {
+ code = 25;
+ val = 4;
+ } else {
+ B43_WARN_ON(1);
+ return;
+ }
+
+ class = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 7, 4);
+ b43_nphy_read_clip_detection(dev, clip_state);
+ b43_nphy_write_clip_detection(dev, clip_off);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ override = 0x140;
+ else
+ override = 0x110;
+
+ regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, val);
+
+ regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, val);
+
+ state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07;
+ state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07;
+ b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
+ b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
+ state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07;
+ state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07;
+
+ b43_nphy_rssi_select(dev, 5, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type);
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp[4];
+ for (j = 0; j < 4; j++)
+ tmp[j] = i;
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
+ b43_nphy_poll_rssi(dev, type, results[i], 8);
+ if (type < 2)
+ for (j = 0; j < 2; j++)
+ miniq[i][j] = min(results[i][2 * j],
+ results[i][2 * j + 1]);
+ }
+
+ for (i = 0; i < 4; i++) {
+ s32 mind = 40;
+ u8 minvcm = 0;
+ s32 minpoll = 249;
+ s32 curr;
+ for (j = 0; j < 4; j++) {
+ if (type == 2)
+ curr = abs(results[j][i]);
+ else
+ curr = abs(miniq[j][i / 2] - code * 8);
+
+ if (curr < mind) {
+ mind = curr;
+ minvcm = j;
+ }
+
+ if (results[j][i] < minpoll)
+ minpoll = results[j][i];
+ }
+ results_min[i] = minpoll;
+ vcm_final[i] = minvcm;
+ }
+
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
+
+ for (i = 0; i < 4; i++) {
+ offset[i] = (code * 8) - results[vcm_final[i]][i];
+
+ if (offset[i] < 0)
+ offset[i] = -((abs(offset[i]) + 4) / 8);
+ else
+ offset[i] = (offset[i] + 4) / 8;
+
+ if (results_min[i] == 248)
+ offset[i] = code - 32;
+
+ if (i % 2 == 0)
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
+ type);
+ else
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
+ type);
+ }
+
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
+
+ switch (state[2]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 1, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 1, 0);
+ break;
+ case 2:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ }
+
+ switch (state[3]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 2, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 2, 0);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 2, 1);
+ break;
+ }
+
+ b43_nphy_rssi_select(dev, 0, type);
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
+
+ b43_nphy_classifier(dev, 7, class);
+ b43_nphy_write_clip_detection(dev, clip_state);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
+static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
+{
+ /* TODO */
+}
+
+/*
+ * RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
+ */
+static void b43_nphy_rssi_cal(struct b43_wldev *dev)
+{
+ if (dev->phy.rev >= 3) {
+ b43_nphy_rev3_rssi_cal(dev);
+ } else {
+ b43_nphy_rev2_rssi_cal(dev, 2);
+ b43_nphy_rev2_rssi_cal(dev, 0);
+ b43_nphy_rev2_rssi_cal(dev, 1);
+ }
}
+/*
+ * Restore RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
+ */
+static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 *rssical_radio_regs = NULL;
+ u16 *rssical_phy_regs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (!nphy->rssical_chanspec_2G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
+ } else {
+ if (!nphy->rssical_chanspec_5G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
+ }
+
+ /* TODO use some definitions */
+ b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]);
+ b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ /* TODO If the chip is 47162
+ return txpwrctrl_tx_gain_ipa_rev5 */
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
+static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 *save = nphy->tx_rx_cal_radio_saveregs;
+ u16 tmp;
+ u8 offset, i;
+
+ if (dev->phy.rev >= 3) {
+ for (i = 0; i < 2; i++) {
+ tmp = (i == 0) ? 0x2000 : 0x3000;
+ offset = i * 11;
+
+ save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL);
+ save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL);
+ save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS);
+ save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS);
+ save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS);
+ save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV);
+ save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1);
+ save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2);
+ save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL);
+ save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC);
+ save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ if (nphy->ipa5g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 4);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 1);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F);
+ }
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0);
+ if (nphy->ipa2g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 6);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2,
+ (dev->phy.rev < 5) ? 0x11 : 0x01);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ }
+ }
+ b43_radio_write16(dev, tmp | B2055_XOREGUL, 0);
+ b43_radio_write16(dev, tmp | B2055_XOMISC, 0);
+ b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0);
+ }
+ } else {
+ save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
+
+ save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
+
+ save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
+
+ save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
+
+ save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX);
+ save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX);
+
+ if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
+ B43_NPHY_BANDCTL_5GHZ)) {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04);
+ } else {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20);
+ }
+
+ if (dev->phy.rev < 2) {
+ b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20);
+ b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20);
+ } else {
+ b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20);
+ b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+ struct nphy_txgains target,
+ struct nphy_iqcal_params *params)
+{
+ int i, j, indx;
+ u16 gain;
+
+ if (dev->phy.rev >= 3) {
+ params->txgm = target.txgm[core];
+ params->pga = target.pga[core];
+ params->pad = target.pad[core];
+ params->ipa = target.ipa[core];
+ params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa);
+ for (j = 0; j < 5; j++)
+ params->ncorr[j] = 0x79;
+ } else {
+ gain = (target.pad[core]) | (target.pga[core] << 4) |
+ (target.txgm[core] << 8);
+
+ indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ 1 : 0;
+ for (i = 0; i < 9; i++)
+ if (tbl_iqcal_gainparams[indx][i][0] == gain)
+ break;
+ i = min(i, 8);
+
+ params->txgm = tbl_iqcal_gainparams[indx][i][1];
+ params->pga = tbl_iqcal_gainparams[indx][i][2];
+ params->pad = tbl_iqcal_gainparams[indx][i][3];
+ params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+ (params->pad << 2);
+ for (j = 0; j < 4; j++)
+ params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
+static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 scale, entry;
+
+ u16 tmp = nphy->txcal_bbmult;
+ if (core == 0)
+ tmp >>= 8;
+ tmp &= 0xff;
+
+ for (i = 0; i < 18; i++) {
+ scale = (ladder_lo[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i), entry);
+
+ scale = (ladder_iq[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
+static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i;
+ for (i = 0; i < 15; i++)
+ b43_phy_write(dev, B43_PHY_N(0x2C5 + i),
+ tbl_tx_filter_coef_rev4[2][i]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
+static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i, j;
+ /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
+ u16 offset[] = { 0x186, 0x195, 0x2C5 };
+
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[i] + j),
+ tbl_tx_filter_coef_rev4[i][j]);
+
+ if (dev->phy.is_40mhz) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[3][j]);
+ } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[5][j]);
+ }
+
+ if (dev->phy.channel == 14)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[6][j]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
+static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 curr_gain[2];
+ struct nphy_txgains target;
+ const u32 *table = NULL;
+
+ if (nphy->txpwrctrl == 0) {
+ int i;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ target.ipa[i] = curr_gain[i] & 0x000F;
+ target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
+ target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
+ target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
+ } else {
+ target.ipa[i] = curr_gain[i] & 0x0003;
+ target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
+ target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
+ target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
+ }
+ }
+ } else {
+ int i;
+ u16 index[2];
+ index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+ index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (dev->phy.rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ else if (dev->phy.rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0xF;
+ target.pad[i] = (table[index[i]] >> 20) & 0xF;
+ target.pga[i] = (table[index[i]] >> 24) & 0xF;
+ target.txgm[i] = (table[index[i]] >> 28) & 0xF;
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0x3;
+ target.pad[i] = (table[index[i]] >> 18) & 0x3;
+ target.pga[i] = (table[index[i]] >> 20) & 0x7;
+ target.txgm[i] = (table[index[i]] >> 23) & 0x7;
+ }
+ }
+ }
+
+ return target;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
+static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]);
+ b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]);
+ b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]);
+ b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+ b43_nphy_reset_cca(dev);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]);
+ b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
+static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+ u16 tmp;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ if (dev->phy.rev >= 3) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[3] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600);
+
+ regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 3));
+ regs[5] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 3), 0);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 19));
+ regs[6] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 19), 0);
+ regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+
+ b43_nphy_rf_control_intc_override(dev, 2, 1, 3);
+ b43_nphy_rf_control_intc_override(dev, 1, 2, 1);
+ b43_nphy_rf_control_intc_override(dev, 1, 8, 2);
+
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000);
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 2));
+ regs[3] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 2), tmp);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 18));
+ regs[4] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ tmp = 0x0180;
+ else
+ tmp = 0x0120;
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
+static void b43_nphy_save_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+ u16 *txcal_radio_regs = NULL;
+ u8 *iqcal_chanspec;
+ u16 *table = NULL;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_2G;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ } else {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_5G;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ }
+
+ b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs);
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x2021);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0x2022);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x3021);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0x3022);
+ txcal_radio_regs[4] = b43_radio_read(dev, 0x2023);
+ txcal_radio_regs[5] = b43_radio_read(dev, 0x2024);
+ txcal_radio_regs[6] = b43_radio_read(dev, 0x3023);
+ txcal_radio_regs[7] = b43_radio_read(dev, 0x3024);
+ } else {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x8B);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0xBA);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x8D);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0xBC);
+ }
+ *iqcal_chanspec = nphy->radio_chanspec;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 8, table);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
+static void b43_nphy_restore_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 coef[4];
+ u16 *loft = NULL;
+ u16 *table = NULL;
+
+ int i;
+ u16 *txcal_radio_regs = NULL;
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (nphy->iqcal_chanspec_2G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ loft = &nphy->cal_cache.txcal_coeffs_2G[5];
+ } else {
+ if (nphy->iqcal_chanspec_5G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ loft = &nphy->cal_cache.txcal_coeffs_5G[5];
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table);
+
+ for (i = 0; i < 4; i++) {
+ if (dev->phy.rev >= 3)
+ table[i] = coef[i];
+ else
+ coef[i] = 0;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft);
+
+ if (dev->phy.rev < 2)
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ } else {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ }
+
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0x3022, txcal_radio_regs[3]);
+ b43_radio_write(dev, 0x2023, txcal_radio_regs[4]);
+ b43_radio_write(dev, 0x2024, txcal_radio_regs[5]);
+ b43_radio_write(dev, 0x3023, txcal_radio_regs[6]);
+ b43_radio_write(dev, 0x3024, txcal_radio_regs[7]);
+ } else {
+ b43_radio_write(dev, 0x8B, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0xBA, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x8D, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0xBC, txcal_radio_regs[3]);
+ }
+ b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
+static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
+ struct nphy_txgains target,
+ bool full, bool mphase)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ int error = 0;
+ int freq;
+ bool avoid = false;
+ u8 length;
+ u16 tmp, core, type, count, max, numb, last, cmd;
+ const u16 *table;
+ bool phy6or5x;
+
+ u16 buffer[11];
+ u16 diq_start = 0;
+ u16 save[2];
+ u16 gain[2];
+ struct nphy_iqcal_params params[2];
+ bool updated[2] = { };
+
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if (dev->phy.rev >= 4) {
+ avoid = nphy->hang_avoid;
+ nphy->hang_avoid = 0;
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]);
+ gain[i] = params[i].cal_gain;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain);
+
+ b43_nphy_tx_cal_radio_setup(dev);
+ b43_nphy_tx_cal_phy_setup(dev);
+
+ phy6or5x = dev->phy.rev >= 6 ||
+ (dev->phy.rev == 5 && nphy->ipa2g_on &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+ if (phy6or5x) {
+ if (dev->phy.is_40mhz) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_40);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_40);
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_20);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_20);
+ }
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
+
+ if (!dev->phy.is_40mhz)
+ freq = 2500;
+ else
+ freq = 5000;
+
+ if (nphy->mphase_cal_phase_id > 2)
+ b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8,
+ 0xFFFF, 0, true, false);
+ else
+ error = b43_nphy_tx_tone(dev, freq, 250, true, false);
+
+ if (error == 0) {
+ if (nphy->mphase_cal_phase_id > 2) {
+ table = nphy->mphase_txcal_bestcoeffs;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ if (!full && nphy->txiqlocal_coeffsvalid) {
+ table = nphy->txiqlocal_bestc;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ full = true;
+ if (dev->phy.rev >= 3) {
+ table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
+ } else {
+ table = tbl_tx_iqlo_cal_startcoefs;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS;
+ }
+ }
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table);
+
+ if (full) {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
+ } else {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL;
+ }
+
+ if (mphase) {
+ count = nphy->mphase_txcal_cmdidx;
+ numb = min(max,
+ (u16)(count + nphy->mphase_txcal_numcmds));
+ } else {
+ count = 0;
+ numb = max;
+ }
+
+ for (; count < numb; count++) {
+ if (full) {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
+ } else {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_recal[count];
+ }
+
+ core = (cmd & 0x3000) >> 12;
+ type = (cmd & 0x0F00) >> 8;
+
+ if (phy6or5x && updated[core] == 0) {
+ b43_nphy_update_tx_cal_ladder(dev, core);
+ updated[core] = 1;
+ }
+
+ tmp = (params[core].ncorr[type] << 8) | 0x66;
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp);
+
+ if (type == 1 || type == 3 || type == 4) {
+ buffer[0] = b43_ntab_read(dev,
+ B43_NTAB16(15, 69 + core));
+ diq_start = buffer[0];
+ buffer[0] = 0;
+ b43_ntab_write(dev, B43_NTAB16(15, 69 + core),
+ 0);
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd);
+ for (i = 0; i < 2000; i++) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD);
+ if (tmp & 0xC000)
+ break;
+ udelay(10);
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length,
+ buffer);
+
+ if (type == 1 || type == 3 || type == 4)
+ buffer[0] = diq_start;
+ }
+
+ if (mphase)
+ nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
+
+ last = (dev->phy.rev < 3) ? 6 : 7;
+
+ if (!mphase || nphy->mphase_cal_phase_id == last) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer);
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer);
+ if (dev->phy.rev < 3) {
+ buffer[0] = 0;
+ buffer[1] = 0;
+ buffer[2] = 0;
+ buffer[3] = 0;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ buffer);
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->txiqlocal_bestc);
+ nphy->txiqlocal_coeffsvalid = true;
+ /* TODO: Set nphy->txiqlocal_chanspec to
+ the current channel */
+ } else {
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->mphase_txcal_bestcoeffs);
+ }
+
+ b43_nphy_stop_playback(dev);
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0);
+ }
+
+ b43_nphy_tx_cal_phy_cleanup(dev);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (dev->phy.rev >= 4)
+ nphy->hang_avoid = avoid;
+
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ return error;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
+static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 buffer[7];
+ bool equal = true;
+
+ if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
+ return;
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+ for (i = 0; i < 4; i++) {
+ if (buffer[i] != nphy->txiqlocal_bestc[i]) {
+ equal = false;
+ break;
+ }
+ }
+
+ if (!equal) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4,
+ nphy->txiqlocal_bestc);
+ for (i = 0; i < 4; i++)
+ buffer[i] = 0;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ &nphy->txiqlocal_bestc[5]);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ &nphy->txiqlocal_bestc[5]);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
+static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j, index;
+ u8 rfctl[2];
+ u8 afectl_core;
+ u16 tmp[6];
+ u16 cur_hpf1, cur_hpf2, cur_lna;
+ u32 real, imag;
+ enum ieee80211_band band;
+
+ u8 use;
+ u16 cur_hpf;
+ u16 lna[3] = { 3, 3, 1 };
+ u16 hpf1[3] = { 7, 2, 0 };
+ u16 hpf2[3] = { 2, 0, 0 };
+ u32 power[3] = { };
+ u16 gain_save[2];
+ u16 cal_gain[2];
+ struct nphy_iqcal_params cal_params[2];
+ struct nphy_iq_est est;
+ int ret = 0;
+ bool playtone = true;
+ int desired = 13;
+
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev < 2)
+ b43_nphy_reapply_tx_cal_coeffs(dev);
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
+ cal_gain[i] = cal_params[i].cal_gain;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ rfctl[0] = B43_NPHY_RFCTL_INTC1;
+ rfctl[1] = B43_NPHY_RFCTL_INTC2;
+ afectl_core = B43_NPHY_AFECTL_C1;
+ } else {
+ rfctl[0] = B43_NPHY_RFCTL_INTC2;
+ rfctl[1] = B43_NPHY_RFCTL_INTC1;
+ afectl_core = B43_NPHY_AFECTL_C2;
+ }
+
+ tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ tmp[2] = b43_phy_read(dev, afectl_core);
+ tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ tmp[4] = b43_phy_read(dev, rfctl[0]);
+ tmp[5] = b43_phy_read(dev, rfctl[1]);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ (1 - i));
+ b43_phy_set(dev, afectl_core, 0x0006);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006);
+
+ band = b43_current_band(dev->wl);
+
+ if (nphy->rxcalparams & 0xFF000000) {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x140);
+ else
+ b43_phy_write(dev, rfctl[0], 0x110);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x180);
+ else
+ b43_phy_write(dev, rfctl[0], 0x120);
+ }
+
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[1], 0x148);
+ else
+ b43_phy_write(dev, rfctl[1], 0x114);
+
+ if (nphy->rxcalparams & 0x10000) {
+ b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC,
+ (i + 1));
+ b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC,
+ (2 - i));
+ }
+
+ for (j = 0; i < 4; j++) {
+ if (j < 3) {
+ cur_lna = lna[j];
+ cur_hpf1 = hpf1[j];
+ cur_hpf2 = hpf2[j];
+ } else {
+ if (power[1] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 2;
+ } else {
+ if (power[0] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 1;
+ } else {
+ index = 0;
+ use = 2;
+ cur_hpf = cur_hpf2;
+ }
+ }
+ cur_lna = lna[index];
+ cur_hpf1 = hpf1[index];
+ cur_hpf2 = hpf2[index];
+ cur_hpf += desired - hweight32(power[index]);
+ cur_hpf = clamp_val(cur_hpf, 0, 10);
+ if (use == 1)
+ cur_hpf1 = cur_hpf;
+ else
+ cur_hpf2 = cur_hpf;
+ }
+
+ tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
+ (cur_lna << 2));
+ b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3,
+ false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_stop_playback(dev);
+
+ if (playtone) {
+ ret = b43_nphy_tx_tone(dev, 4000,
+ (nphy->rxcalparams & 0xFFFF),
+ false, false);
+ playtone = false;
+ } else {
+ b43_nphy_run_samples(dev, 160, 0xFFFF, 0,
+ false, false);
+ }
+
+ if (ret == 0) {
+ if (j < 3) {
+ b43_nphy_rx_iq_est(dev, &est, 1024, 32,
+ false);
+ if (i == 0) {
+ real = est.i0_pwr;
+ imag = est.q0_pwr;
+ } else {
+ real = est.i1_pwr;
+ imag = est.q1_pwr;
+ }
+ power[i] = ((real + imag) / 1024) + 1;
+ } else {
+ b43_nphy_calc_rx_iq_comp(dev, 1 << i);
+ }
+ b43_nphy_stop_playback(dev);
+ }
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC);
+ b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC);
+ b43_phy_write(dev, rfctl[1], tmp[5]);
+ b43_phy_write(dev, rfctl[0], tmp[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]);
+ b43_phy_write(dev, afectl_core, tmp[2]);
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]);
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_nphy_rf_control_override(dev, 0x400, 0, 3, true);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+
+ b43_nphy_stay_in_carrier_search(dev, 0);
+
+ return ret;
+}
+
+static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ return -1;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
+static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ if (dev->phy.rev >= 3)
+ return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
+ else
+ return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
+}
+
+/*
+ * Init N-PHY
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ */
int b43_phy_initn(struct b43_wldev *dev)
{
+ struct ssb_bus *bus = dev->dev->bus;
struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+ u8 tx_pwr_state;
+ struct nphy_txgains target;
u16 tmp;
+ enum ieee80211_band tmp2;
+ bool do_rssi_cal;
- //TODO: Spectral management
+ u16 clip[2];
+ bool do_cal = false;
+
+ if ((dev->phy.rev >= 3) &&
+ (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+ (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+ chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
+ }
+ nphy->deaf_count = 0;
b43_nphy_tables_init(dev);
+ nphy->crsminpwr_adjusted = false;
+ nphy->noisevars_adjusted = false;
/* Clear all overrides */
- b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ }
b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0);
b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ if (dev->phy.rev < 6) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ }
b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
~(B43_NPHY_RFSEQMODE_CAOVER |
B43_NPHY_RFSEQMODE_TROVER));
+ if (dev->phy.rev >= 3)
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0);
- tmp = (phy->rev < 2) ? 64 : 59;
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE,
- tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
-
+ if (dev->phy.rev <= 2) {
+ tmp = (dev->phy.rev == 2) ? 0x3B : 0x40;
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE,
+ tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
+ }
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
- b43_phy_write(dev, B43_NPHY_TXREALFD, 184);
- b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 200);
- b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 80);
- b43_phy_write(dev, B43_NPHY_C2_BCLIPBKOFF, 511);
+ if (bus->sprom.boardflags2_lo & 0x100 ||
+ (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
+ bus->boardinfo.type == 0x8B))
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
+ else
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
+ b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8);
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
+ b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
- //TODO MIMO-Config
- //TODO Update TX/RX chain
+ b43_nphy_update_mimo_config(dev, nphy->preamble_override);
+ b43_nphy_update_txrx_chain(dev);
if (phy->rev < 2) {
b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8);
b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
}
+
+ tmp2 = b43_current_band(dev->wl);
+ if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
+ b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
+ nphy->papd_epsilon_offset[0] << 7);
+ b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F,
+ nphy->papd_epsilon_offset[1] << 7);
+ b43_nphy_int_pa_set_tx_dig_filters(dev);
+ } else if (phy->rev >= 5) {
+ b43_nphy_ext_pa_set_tx_dig_filters(dev);
+ }
+
b43_nphy_workarounds(dev);
- b43_nphy_reset_cca(dev);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | B43_TMSLOW_MACPHYCLKEN);
+ /* Reset CCA, in init code it differs a little from standard way */
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+
+ /* TODO N PHY MAC PHY Clock Set with argument 1 */
+
+ b43_nphy_pa_override(dev, false);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_pa_override(dev, true);
+
+ b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_read_clip_detection(dev, clip);
+ tx_pwr_state = nphy->txpwrctrl;
+ /* TODO N PHY TX power control with argument 0
+ (turning off power control) */
+ /* TODO Fix the TX Power Settings */
+ /* TODO N PHY TX Power Control Idle TSSI */
+ /* TODO N PHY TX Power Control Setup */
+
+ if (phy->rev >= 3) {
+ /* TODO */
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ }
+
+ if (nphy->phyrxchain != 3)
+ ;/* TODO N PHY RX Core Set State with phyrxchain as argument */
+ if (nphy->mphase_cal_phase_id > 0)
+ ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
+
+ do_rssi_cal = false;
+ if (phy->rev >= 3) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
+ else
+ do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
+
+ if (do_rssi_cal)
+ b43_nphy_rssi_cal(dev);
+ else
+ b43_nphy_restore_rssi_cal(dev);
+ } else {
+ b43_nphy_rssi_cal(dev);
+ }
+
+ if (!((nphy->measure_hold & 0x6) != 0)) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_cal = (nphy->iqcal_chanspec_2G == 0);
+ else
+ do_cal = (nphy->iqcal_chanspec_5G == 0);
+
+ if (nphy->mute)
+ do_cal = false;
+
+ if (do_cal) {
+ target = b43_nphy_get_tx_gains(dev);
+
+ if (nphy->antsel_type == 2)
+ ;/*TODO NPHY Superswitch Init with argument 1*/
+ if (nphy->perical != 2) {
+ b43_nphy_rssi_cal(dev);
+ if (phy->rev >= 3) {
+ nphy->cal_orig_pwr_idx[0] =
+ nphy->txpwrindex[0].index_internal;
+ nphy->cal_orig_pwr_idx[1] =
+ nphy->txpwrindex[1].index_internal;
+ /* TODO N PHY Pre Calibrate TX Gain */
+ target = b43_nphy_get_tx_gains(dev);
+ }
+ }
+ }
+ }
+
+ if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
+ if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
+ b43_nphy_save_cal(dev);
+ else if (nphy->mphase_cal_phase_id == 0)
+ ;/* N PHY Periodic Calibration with argument 3 */
+ } else {
+ b43_nphy_restore_cal(dev);
+ }
- b43_phy_read(dev, B43_NPHY_CLASSCTL); /* dummy read */
- //TODO read core1/2 clip1 thres regs
-
- if (1 /* FIXME Band is 2.4GHz */)
- b43_nphy_bphy_init(dev);
- //TODO disable TX power control
- //TODO Fix the TX power settings
- //TODO Init periodic calibration with reason 3
- b43_nphy_rssi_cal(dev, 2);
- b43_nphy_rssi_cal(dev, 0);
- b43_nphy_rssi_cal(dev, 1);
- //TODO get TX gain
- //TODO init superswitch
- //TODO calibrate LO
- //TODO idle TSSI TX pctl
- //TODO TX power control power setup
- //TODO table writes
- //TODO TX power control coefficients
- //TODO enable TX power control
- //TODO control antenna selection
- //TODO init radar detection
- //TODO reset channel if changed
+ b43_nphy_tx_pwr_ctrl_coef_setup(dev);
+ /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
+ b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
+ b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
+ if (phy->rev >= 3 && phy->rev <= 6)
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+ b43_nphy_tx_lp_fbw(dev);
+ if (phy->rev >= 3)
+ b43_nphy_spur_workaround(dev);
b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 1749aef4147d..403aad3f894f 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -231,6 +231,7 @@
#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */
#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */
#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */
+#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */
#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */
#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */
#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0
@@ -705,6 +706,10 @@
#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
+#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
+#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
@@ -919,8 +924,99 @@
struct b43_wldev;
+struct b43_phy_n_iq_comp {
+ s16 a0;
+ s16 b0;
+ s16 a1;
+ s16 b1;
+};
+
+struct b43_phy_n_rssical_cache {
+ u16 rssical_radio_regs_2G[2];
+ u16 rssical_phy_regs_2G[12];
+
+ u16 rssical_radio_regs_5G[2];
+ u16 rssical_phy_regs_5G[12];
+};
+
+struct b43_phy_n_cal_cache {
+ u16 txcal_radio_regs_2G[8];
+ u16 txcal_coeffs_2G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_2G;
+
+ u16 txcal_radio_regs_5G[8];
+ u16 txcal_coeffs_5G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_5G;
+};
+
+struct b43_phy_n_txpwrindex {
+ s8 index;
+ s8 index_internal;
+ s8 index_internal_save;
+ u16 AfectrlOverride;
+ u16 AfeCtrlDacGain;
+ u16 rad_gain;
+ u8 bbmult;
+ u16 iqcomp_a;
+ u16 iqcomp_b;
+ u16 locomp;
+};
+
struct b43_phy_n {
- //TODO lots of missing stuff
+ u8 antsel_type;
+ u8 cal_orig_pwr_idx[2];
+ u8 measure_hold;
+ u8 phyrxchain;
+ u8 perical;
+ u32 deaf_count;
+ u32 rxcalparams;
+ bool hang_avoid;
+ bool mute;
+ u16 papd_epsilon_offset[2];
+ s32 preamble_override;
+ u32 bb_mult_save;
+ u16 radio_chanspec;
+
+ bool gain_boost;
+ bool elna_gain_config;
+ bool band5g_pwrgain;
+
+ u8 mphase_cal_phase_id;
+ u16 mphase_txcal_cmdidx;
+ u16 mphase_txcal_numcmds;
+ u16 mphase_txcal_bestcoeffs[11];
+
+ u8 txpwrctrl;
+ u16 txcal_bbmult;
+ u16 txiqlocal_bestc[11];
+ bool txiqlocal_coeffsvalid;
+ struct b43_phy_n_txpwrindex txpwrindex[2];
+
+ u8 txrx_chain;
+ u16 tx_rx_cal_phy_saveregs[11];
+ u16 tx_rx_cal_radio_saveregs[22];
+
+ u16 rfctrl_intc1_save;
+ u16 rfctrl_intc2_save;
+
+ u16 classifier_state;
+ u16 clip_state[2];
+
+ bool aband_spurwar_en;
+ bool gband_spurwar_en;
+
+ bool ipa2g_on;
+ u8 iqcal_chanspec_2G;
+ u8 rssical_chanspec_2G;
+
+ bool ipa5g_on;
+ u8 iqcal_chanspec_5G;
+ u8 rssical_chanspec_5G;
+
+ struct b43_phy_n_rssical_cache rssical_cache;
+ struct b43_phy_n_cal_cache cal_cache;
+ bool crsminpwr_adjusted;
+ bool noisevars_adjusted;
};
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index c01b8e02412f..aa12273ae716 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/slab.h>
static u16 generate_cookie(struct b43_pio_txqueue *q,
@@ -559,7 +560,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "PIO transmission failure\n");
goto out;
}
- q->nr_tx_packets++;
B43_WARN_ON(q->buffer_used > q->buffer_size);
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
@@ -605,22 +605,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_pio_txqueue *q;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- q = select_queue_by_priority(dev, i);
-
- stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
- stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
- stats[i].count = q->nr_tx_packets;
- }
-}
-
/* Returns whether we should fetch another frame. */
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
{
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9ddad..1e516147424f 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
#define B43_PIO_MAX_NR_TXPACKETS 32
-#ifdef CONFIG_B43_PIO
-
struct b43_pio_txpacket {
/* Pointer to the TX queue we belong to. */
struct b43_pio_txqueue *queue;
@@ -92,9 +90,6 @@ struct b43_pio_txqueue {
struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS];
struct list_head packets_list;
- /* Total number of transmitted packets. */
- unsigned int nr_tx_packets;
-
/* Shortcut to the 802.11 core revision. This is to
* avoid horrible pointer dereferencing in the fastpaths. */
u8 rev;
@@ -162,49 +157,9 @@ void b43_pio_free(struct b43_wldev *dev);
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
void b43_pio_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43_pio_rx(struct b43_pio_rxqueue *q);
void b43_pio_tx_suspend(struct b43_wldev *dev);
void b43_pio_tx_resume(struct b43_wldev *dev);
-
-#else /* CONFIG_B43_PIO */
-
-
-static inline int b43_pio_init(struct b43_wldev *dev)
-{
- return 0;
-}
-static inline void b43_pio_free(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_stop(struct b43_wldev *dev)
-{
-}
-static inline int b43_pio_tx(struct b43_wldev *dev,
- struct sk_buff *skb)
-{
- return 0;
-}
-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
- const struct b43_txstatus *status)
-{
-}
-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
-{
-}
-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
-{
-}
-
-#endif /* CONFIG_B43_PIO */
#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 0d3ac64147a5..4e56b7bbcebd 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -16,6 +16,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/slab.h>
#include <linux/ssb/ssb.h>
#include "sdio.h"
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 4e2336315545..a00d509150f7 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1336,7 +1336,7 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
}
-const u8 b43_ntab_adjustpower0[] = {
+static const u8 b43_ntab_adjustpower0[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1355,7 +1355,7 @@ const u8 b43_ntab_adjustpower0[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u8 b43_ntab_adjustpower1[] = {
+static const u8 b43_ntab_adjustpower1[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1374,11 +1374,11 @@ const u8 b43_ntab_adjustpower1[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u16 b43_ntab_bdi[] = {
+static const u16 b43_ntab_bdi[] = {
0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2,
};
-const u32 b43_ntab_channelest[] = {
+static const u32 b43_ntab_channelest[] = {
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x10101010, 0x10101010, 0x10101010, 0x10101010,
@@ -1405,7 +1405,7 @@ const u32 b43_ntab_channelest[] = {
0x10101010, 0x10101010, 0x10101010, 0x10101010,
};
-const u8 b43_ntab_estimatepowerlt0[] = {
+static const u8 b43_ntab_estimatepowerlt0[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1416,7 +1416,7 @@ const u8 b43_ntab_estimatepowerlt0[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_estimatepowerlt1[] = {
+static const u8 b43_ntab_estimatepowerlt1[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1427,14 +1427,14 @@ const u8 b43_ntab_estimatepowerlt1[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_framelookup[] = {
+static const u8 b43_ntab_framelookup[] = {
0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E,
0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A,
0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A,
};
-const u32 b43_ntab_framestruct[] = {
+static const u32 b43_ntab_framestruct[] = {
0x08004A04, 0x00100000, 0x01000A05, 0x00100020,
0x09804506, 0x00100030, 0x09804507, 0x00100030,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1645,7 +1645,7 @@ const u32 b43_ntab_framestruct[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_gainctl0[] = {
+static const u32 b43_ntab_gainctl0[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1680,7 +1680,7 @@ const u32 b43_ntab_gainctl0[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_gainctl1[] = {
+static const u32 b43_ntab_gainctl1[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1715,12 +1715,12 @@ const u32 b43_ntab_gainctl1[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_intlevel[] = {
+static const u32 b43_ntab_intlevel[] = {
0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46,
0x00C1188D, 0x080024D2, 0x00000070,
};
-const u32 b43_ntab_iqlt0[] = {
+static const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1755,7 +1755,7 @@ const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u32 b43_ntab_iqlt1[] = {
+static const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1790,7 +1790,7 @@ const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u16 b43_ntab_loftlt0[] = {
+static const u16 b43_ntab_loftlt0[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1815,7 +1815,7 @@ const u16 b43_ntab_loftlt0[] = {
0x0002, 0x0103,
};
-const u16 b43_ntab_loftlt1[] = {
+static const u16 b43_ntab_loftlt1[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1840,7 +1840,7 @@ const u16 b43_ntab_loftlt1[] = {
0x0002, 0x0103,
};
-const u8 b43_ntab_mcs[] = {
+static const u8 b43_ntab_mcs[] = {
0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C,
0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C,
0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C,
@@ -1859,7 +1859,7 @@ const u8 b43_ntab_mcs[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
-const u32 b43_ntab_noisevar10[] = {
+static const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1926,7 +1926,7 @@ const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u32 b43_ntab_noisevar11[] = {
+static const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1993,7 +1993,7 @@ const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u16 b43_ntab_pilot[] = {
+static const u16 b43_ntab_pilot[] = {
0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08,
0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5,
0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82,
@@ -2011,12 +2011,12 @@ const u16 b43_ntab_pilot[] = {
0xF0A0, 0xF028, 0xFFFF, 0xFFFF,
};
-const u32 b43_ntab_pilotlt[] = {
+static const u32 b43_ntab_pilotlt[] = {
0x76540123, 0x62407351, 0x76543201, 0x76540213,
0x76540123, 0x76430521,
};
-const u32 b43_ntab_tdi20a0[] = {
+static const u32 b43_ntab_tdi20a0[] = {
0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0,
0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D,
0x00020301, 0x00030504, 0x00040708, 0x0005090B,
@@ -2033,7 +2033,7 @@ const u32 b43_ntab_tdi20a0[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi20a1[] = {
+static const u32 b43_ntab_tdi20a1[] = {
0x00014B26, 0x00028D29, 0x000393AD, 0x00049630,
0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D,
0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B,
@@ -2050,7 +2050,7 @@ const u32 b43_ntab_tdi20a1[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a0[] = {
+static const u32 b43_ntab_tdi40a0[] = {
0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2,
0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C,
0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2,
@@ -2081,7 +2081,7 @@ const u32 b43_ntab_tdi40a0[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a1[] = {
+static const u32 b43_ntab_tdi40a1[] = {
0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD,
0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07,
0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D,
@@ -2112,7 +2112,7 @@ const u32 b43_ntab_tdi40a1[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdtrn[] = {
+static const u32 b43_ntab_tdtrn[] = {
0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6,
0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68,
0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52,
@@ -2291,7 +2291,7 @@ const u32 b43_ntab_tdtrn[] = {
0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE,
};
-const u32 b43_ntab_tmap[] = {
+static const u32 b43_ntab_tmap[] = {
0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888,
0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8,
0xF1111110, 0x11111111, 0x11F11111, 0x00000111,
@@ -2406,6 +2406,544 @@ const u32 b43_ntab_tmap[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+const u32 b43_ntab_tx_gain_rev0_1_2[] = {
+ 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
+ 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
+ 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
+ 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44,
+ 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844,
+ 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644,
+ 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444,
+ 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44,
+ 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844,
+ 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44,
+ 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944,
+ 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744,
+ 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544,
+ 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44,
+ 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844,
+ 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44,
+ 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944,
+ 0x03902942, 0x03902844, 0x03902842, 0x03902744,
+ 0x03902742, 0x03902644, 0x03902642, 0x03902544,
+ 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44,
+ 0x03802a42, 0x03802944, 0x03802942, 0x03802844,
+ 0x03802842, 0x03802744, 0x03802742, 0x03802644,
+ 0x03802642, 0x03802544, 0x03802542, 0x03802444,
+ 0x03802442, 0x03802344, 0x03802342, 0x03802244,
+ 0x03802242, 0x03802144, 0x03802142, 0x03802044,
+ 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44,
+ 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44,
+ 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44,
+ 0x03801a42, 0x03801944, 0x03801942, 0x03801844,
+ 0x03801842, 0x03801744, 0x03801742, 0x03801644,
+ 0x03801642, 0x03801544, 0x03801542, 0x03801444,
+ 0x03801442, 0x03801344, 0x03801342, 0x00002b00,
+};
+
+const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
+ 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
+ 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
+ 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
+ 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037,
+ 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e,
+ 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037,
+ 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e,
+ 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037,
+ 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e,
+ 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037,
+ 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e,
+ 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037,
+ 0x19410044, 0x19410042, 0x19410040, 0x1941003e,
+ 0x1941003c, 0x1941003b, 0x19410039, 0x19410037,
+ 0x18410044, 0x18410042, 0x18410040, 0x1841003e,
+ 0x1841003c, 0x1841003b, 0x18410039, 0x18410037,
+ 0x17410044, 0x17410042, 0x17410040, 0x1741003e,
+ 0x1741003c, 0x1741003b, 0x17410039, 0x17410037,
+ 0x16410044, 0x16410042, 0x16410040, 0x1641003e,
+ 0x1641003c, 0x1641003b, 0x16410039, 0x16410037,
+ 0x15410044, 0x15410042, 0x15410040, 0x1541003e,
+ 0x1541003c, 0x1541003b, 0x15410039, 0x15410037,
+ 0x14410044, 0x14410042, 0x14410040, 0x1441003e,
+ 0x1441003c, 0x1441003b, 0x14410039, 0x14410037,
+ 0x13410044, 0x13410042, 0x13410040, 0x1341003e,
+ 0x1341003c, 0x1341003b, 0x13410039, 0x13410037,
+ 0x12410044, 0x12410042, 0x12410040, 0x1241003e,
+ 0x1241003c, 0x1241003b, 0x12410039, 0x12410037,
+ 0x11410044, 0x11410042, 0x11410040, 0x1141003e,
+ 0x1141003c, 0x1141003b, 0x11410039, 0x11410037,
+ 0x10410044, 0x10410042, 0x10410040, 0x1041003e,
+ 0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
+};
+
+const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
+ 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
+ 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
+ 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
+ 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037,
+ 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e,
+ 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037,
+ 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e,
+ 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037,
+ 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e,
+ 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037,
+ 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e,
+ 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037,
+ 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e,
+ 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037,
+ 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e,
+ 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037,
+ 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e,
+ 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037,
+ 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e,
+ 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037,
+ 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e,
+ 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037,
+ 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e,
+ 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037,
+ 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e,
+ 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037,
+ 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e,
+ 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037,
+ 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e,
+ 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037,
+ 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e,
+ 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
+};
+
+const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
+ 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
+ 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
+ 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
+ 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037,
+ 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e,
+ 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037,
+ 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e,
+ 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037,
+ 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e,
+ 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037,
+ 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e,
+ 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037,
+ 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e,
+ 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037,
+ 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e,
+ 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037,
+ 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e,
+ 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037,
+ 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e,
+ 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037,
+ 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e,
+ 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037,
+ 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e,
+ 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038,
+ 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e,
+ 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037,
+ 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e,
+ 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037,
+ 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e,
+ 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037,
+ 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c,
+ 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
+};
+
+const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
+ 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
+ 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
+ 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
+ 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a,
+ 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e,
+ 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a,
+ 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e,
+ 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037,
+ 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040,
+ 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a,
+ 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c,
+ 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038,
+ 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b,
+ 0x09620039, 0x09620037, 0x09620035, 0x09620033,
+ 0x08620044, 0x08620042, 0x08620040, 0x0862003e,
+ 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039,
+ 0x07620043, 0x07620042, 0x07620040, 0x0762003f,
+ 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039,
+ 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b,
+ 0x06620039, 0x06620037, 0x06620035, 0x06620033,
+ 0x05620046, 0x05620044, 0x05620042, 0x05620040,
+ 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039,
+ 0x04620044, 0x04620042, 0x04620040, 0x0462003e,
+ 0x0462003c, 0x0462003b, 0x04620039, 0x04620038,
+ 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039,
+ 0x03620038, 0x03620037, 0x03620035, 0x03620033,
+ 0x0262004c, 0x0262004a, 0x02620048, 0x02620047,
+ 0x02620046, 0x02620044, 0x02620043, 0x02620042,
+ 0x0162004a, 0x01620048, 0x01620046, 0x01620044,
+ 0x01620043, 0x01620042, 0x01620041, 0x01620040,
+ 0x00620042, 0x00620040, 0x0062003e, 0x0062003c,
+ 0x0062003b, 0x00620039, 0x00620037, 0x00620035,
+};
+
+const u32 txpwrctrl_tx_gain_ipa[] = {
+ 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
+ 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
+ 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
+ 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025,
+ 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029,
+ 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025,
+ 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029,
+ 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025,
+ 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029,
+ 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025,
+ 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029,
+ 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025,
+ 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029,
+ 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025,
+ 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029,
+ 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025,
+ 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029,
+ 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025,
+ 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029,
+ 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025,
+ 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029,
+ 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025,
+ 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029,
+ 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025,
+ 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029,
+ 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025,
+ 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029,
+ 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025,
+ 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029,
+ 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025,
+ 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029,
+ 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
+ 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
+ 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
+ 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
+ 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025,
+ 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029,
+ 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025,
+ 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029,
+ 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025,
+ 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029,
+ 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025,
+ 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029,
+ 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025,
+ 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029,
+ 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025,
+ 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029,
+ 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025,
+ 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029,
+ 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025,
+ 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029,
+ 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025,
+ 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029,
+ 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025,
+ 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029,
+ 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025,
+ 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029,
+ 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025,
+ 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029,
+ 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025,
+ 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029,
+ 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025,
+ 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029,
+ 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
+ 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
+ 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
+ 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
+ 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025,
+ 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029,
+ 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025,
+ 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029,
+ 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025,
+ 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029,
+ 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025,
+ 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029,
+ 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025,
+ 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029,
+ 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025,
+ 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029,
+ 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025,
+ 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029,
+ 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025,
+ 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029,
+ 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025,
+ 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029,
+ 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025,
+ 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029,
+ 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025,
+ 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029,
+ 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025,
+ 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029,
+ 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025,
+ 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029,
+ 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025,
+ 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029,
+ 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_5g[] = {
+ 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
+ 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
+ 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
+ 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022,
+ 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025,
+ 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027,
+ 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023,
+ 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027,
+ 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022,
+ 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025,
+ 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021,
+ 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026,
+ 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022,
+ 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026,
+ 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022,
+ 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026,
+ 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022,
+ 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026,
+ 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022,
+ 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026,
+ 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021,
+ 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026,
+ 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029,
+ 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024,
+ 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027,
+ 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023,
+ 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026,
+ 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022,
+ 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025,
+ 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027,
+ 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022,
+ 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
+};
+
+const u16 tbl_iqcal_gainparams[2][9][8] = {
+ {
+ { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
+ { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 },
+ { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 },
+ { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 },
+ { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 },
+ { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 },
+ { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 },
+ { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 },
+ { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 }
+ },
+ {
+ { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 },
+ { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 },
+ { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 }
+ }
+};
+
+const struct nphy_txiqcal_ladder ladder_lo[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 25, 1 },
+ { 25, 2 },
+ { 25, 3 },
+ { 25, 4 },
+ { 25, 5 },
+ { 25, 6 },
+ { 25, 7 },
+ { 35, 7 },
+ { 50, 7 },
+ { 71, 7 },
+ { 100, 7 }
+};
+
+const struct nphy_txiqcal_ladder ladder_iq[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 35, 0 },
+ { 50, 0 },
+ { 71, 0 },
+ { 100, 0 },
+ { 100, 1 },
+ { 100, 2 },
+ { 100, 3 },
+ { 100, 4 },
+ { 100, 5 },
+ { 100, 6 },
+ { 100, 7 }
+};
+
+const u16 loscale[] = {
+ 256, 256, 271, 271,
+ 287, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 406, 406, 430,
+ 430, 455, 455, 482,
+ 482, 511, 511, 541,
+ 541, 573, 573, 607,
+ 607, 643, 643, 681,
+ 681, 722, 722, 764,
+ 764, 810, 810, 858,
+ 858, 908, 908, 962,
+ 962, 1019, 1019, 256
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ 0x0200, 0x0300, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1201,
+ 0x1202, 0x1203, 0x1204, 0x1205,
+ 0x1206, 0x1207, 0x1907, 0x2307,
+ 0x3207, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ 0x0300, 0x0500, 0x0700, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x1901,
+ 0x1902, 0x1903, 0x1904, 0x1905,
+ 0x1906, 0x1907, 0x2407, 0x3207,
+ 0x4607, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ 0x0100, 0x0200, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1900,
+ 0x2300, 0x3200, 0x4700, 0x4701,
+ 0x4702, 0x4703, 0x4704, 0x4705,
+ 0x4706, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ 0x0200, 0x0300, 0x0600, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x2400,
+ 0x3200, 0x4600, 0x6400, 0x6401,
+ 0x6402, 0x6403, 0x6404, 0x6405,
+ 0x6406, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { };
+
+const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { };
+
+const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ 0x8423, 0x8323, 0x8073, 0x8256,
+ 0x8045, 0x8223, 0x9423, 0x9323,
+ 0x9073, 0x9256, 0x9045, 0x9223
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ 0x8101, 0x8253, 0x8053, 0x8234,
+ 0x8034, 0x9101, 0x9253, 0x9053,
+ 0x9234, 0x9034
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ 0x8123, 0x8264, 0x8086, 0x8245,
+ 0x8056, 0x9123, 0x9264, 0x9086,
+ 0x9245, 0x9056
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ 0x8434, 0x8334, 0x8084, 0x8267,
+ 0x8056, 0x8234, 0x9434, 0x9334,
+ 0x9084, 0x9267, 0x9056, 0x9234
+};
+
+const s16 tbl_tx_filter_coef_rev4[7][15] = {
+ { -377, 137, -407, 208, -1527,
+ 956, 93, 186, 93, 230,
+ -44, 230, 20, -191, 201 },
+ { -77, 20, -98, 49, -93,
+ 60, 56, 111, 56, 26,
+ -5, 26, 34, -32, 34 },
+ { -360, 164, -376, 164, -1533,
+ 576, 308, -314, 308, 121,
+ -73, 121, 91, 124, 91 },
+ { -295, 200, -363, 142, -1391,
+ 826, 151, 301, 151, 151,
+ 301, 151, 602, -752, 602 },
+ { -92, 58, -96, 49, -104,
+ 44, 17, 35, 17, 12,
+ 25, 12, 13, 27, 13 },
+ { -375, 136, -399, 209, -1479,
+ 949, 130, 260, 130, 230,
+ -44, 230, 201, -191, 201 },
+ { 0xed9, 0xc8, 0xe95, 0x8e, 0xa91,
+ 0x33a, 0x97, 0x12d, 0x97, 0x97,
+ 0x12d, 0x97, 0x25a, 0xd10, 0x25a }
+};
+
+/* addr0, addr1, bmask, shift */
+const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[] = {
+ { 0x78, 0x78, 0x0038, 3 }, /* for field == 0x0002 (fls == 2) */
+ { 0x7A, 0x7D, 0x0001, 0 }, /* for field == 0x0004 (fls == 3) */
+ { 0x7A, 0x7D, 0x0002, 1 }, /* for field == 0x0008 (fls == 4) */
+ { 0x7A, 0x7D, 0x0004, 2 }, /* for field == 0x0010 (fls == 5) */
+ { 0x7A, 0x7D, 0x0030, 4 }, /* for field == 0x0020 (fls == 6) */
+ { 0x7A, 0x7D, 0x00C0, 6 }, /* for field == 0x0040 (fls == 7) */
+ { 0x7A, 0x7D, 0x0100, 8 }, /* for field == 0x0080 (fls == 8) */
+ { 0x7A, 0x7D, 0x0200, 9 }, /* for field == 0x0100 (fls == 9) */
+ { 0x78, 0x78, 0x0004, 2 }, /* for field == 0x0200 (fls == 10) */
+ { 0x7B, 0x7E, 0x01FF, 0 }, /* for field == 0x0400 (fls == 11) */
+ { 0x7C, 0x7F, 0x01FF, 0 }, /* for field == 0x0800 (fls == 12) */
+ { 0x78, 0x78, 0x0100, 8 }, /* for field == 0x1000 (fls == 13) */
+ { 0x78, 0x78, 0x0200, 9 }, /* for field == 0x2000 (fls == 14) */
+ { 0x78, 0x78, 0xF000, 12 } /* for field == 0x4000 (fls == 15) */
+};
+
+/* val_mask, val_shift, en_addr0, val_addr0, en_addr1, val_addr1 */
+const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
+ { 0x8000, 15, 0xE5, 0xF9, 0xE6, 0xFB }, /* field == 0x0001 (fls 1) */
+ { 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0002 (fls 2) */
+ { 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0004 (fls 3) */
+ { 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0008 (fls 4) */
+ { 0x0016, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */
+ { 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0020 (fls 6) */
+ { 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0040 (fls 7) */
+ { 0x0080, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */
+ { 0x0100, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */
+ { 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0200 (fls 10) */
+ { 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0400 (fls 11) */
+ { 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0800 (fls 12) */
+ { 0xFFFF, 0, 0xE7, 0x7B, 0xEC, 0x7E }, /* field == 0x1000 (fls 13) */
+ { 0xFFFF, 0, 0xE7, 0x7C, 0xEC, 0x7F }, /* field == 0x2000 (fls 14) */
+ { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
+};
+
static inline void assert_ntab_array_sizes(void)
{
#undef check
@@ -2442,6 +2980,72 @@ static inline void assert_ntab_array_sizes(void)
#undef check
}
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
+{
+ u32 type, value;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ switch (type) {
+ case B43_NTAB_8BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ break;
+ case B43_NTAB_16BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ case B43_NTAB_32BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ value <<= 16;
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ default:
+ B43_WARN_ON(1);
+ value = 0;
+ }
+
+ return value;
+}
+
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data)
+{
+ u32 type;
+ u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ data++;
+ break;
+ case B43_NTAB_16BIT:
+ *((u16 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 2;
+ break;
+ case B43_NTAB_32BIT:
+ *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ *((u32 *)data) <<= 16;
+ *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 4;
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
{
u32 type;
@@ -2474,3 +3078,91 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
/* Some compiletime assertions... */
assert_ntab_array_sizes();
}
+
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data)
+{
+ u32 type, value;
+ const u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ value = *data;
+ data++;
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_16BIT:
+ value = *((u16 *)data);
+ data += 2;
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_32BIT:
+ value = *((u32 *)data);
+ data += 4;
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
+#define ntab_upload(dev, offset, data) do { \
+ unsigned int i; \
+ for (i = 0; i < (offset##_SIZE); i++) \
+ b43_ntab_write(dev, (offset) + i, (data)[i]); \
+ } while (0)
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
+ ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
+ ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
+ ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
+ ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
+ ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
+ ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
+ ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
+ ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
+ ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
+ ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
+ ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
+ ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
+ ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
+
+ /* Volatile tables */
+ ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
+ ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
+ ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
+ ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
+ ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
+ ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
+ ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
+ ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
+ ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
+ ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
+ ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+}
+
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ /* TODO */
+
+ /* Volatile tables */
+ /* TODO */
+}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4d498b053ec7..9c1c6ecd3672 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -46,6 +46,27 @@ struct b43_nphy_channeltab_entry {
struct b43_wldev;
+struct nphy_txiqcal_ladder {
+ u8 percent;
+ u8 g_env;
+};
+
+struct nphy_rf_control_override_rev2 {
+ u8 addr0;
+ u8 addr1;
+ u16 bmask;
+ u8 shift;
+};
+
+struct nphy_rf_control_override_rev3 {
+ u16 val_mask;
+ u8 val_shift;
+ u8 en_addr0;
+ u8 val_addr0;
+ u8 en_addr1;
+ u8 val_addr1;
+};
+
/* Upload the default register value table.
* If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
* table is uploaded. If "ignore_uploadflag" is true, we upload any value
@@ -126,34 +147,57 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12
+
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset);
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data);
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
-
-extern const u8 b43_ntab_adjustpower0[];
-extern const u8 b43_ntab_adjustpower1[];
-extern const u16 b43_ntab_bdi[];
-extern const u32 b43_ntab_channelest[];
-extern const u8 b43_ntab_estimatepowerlt0[];
-extern const u8 b43_ntab_estimatepowerlt1[];
-extern const u8 b43_ntab_framelookup[];
-extern const u32 b43_ntab_framestruct[];
-extern const u32 b43_ntab_gainctl0[];
-extern const u32 b43_ntab_gainctl1[];
-extern const u32 b43_ntab_intlevel[];
-extern const u32 b43_ntab_iqlt0[];
-extern const u32 b43_ntab_iqlt1[];
-extern const u16 b43_ntab_loftlt0[];
-extern const u16 b43_ntab_loftlt1[];
-extern const u8 b43_ntab_mcs[];
-extern const u32 b43_ntab_noisevar10[];
-extern const u32 b43_ntab_noisevar11[];
-extern const u16 b43_ntab_pilot[];
-extern const u32 b43_ntab_pilotlt[];
-extern const u32 b43_ntab_tdi20a0[];
-extern const u32 b43_ntab_tdi20a1[];
-extern const u32 b43_ntab_tdi40a0[];
-extern const u32 b43_ntab_tdi40a1[];
-extern const u32 b43_ntab_tdtrn[];
-extern const u32 b43_ntab_tmap[];
-
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data);
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
+
+extern const u32 b43_ntab_tx_gain_rev0_1_2[];
+extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
+extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
+
+extern const u32 txpwrctrl_tx_gain_ipa[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
+extern const u32 txpwrctrl_tx_gain_ipa_5g[];
+extern const u16 tbl_iqcal_gainparams[2][9][8];
+extern const struct nphy_txiqcal_ladder ladder_lo[];
+extern const struct nphy_txiqcal_ladder ladder_iq[];
+extern const u16 loscale[];
+
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[];
+extern const s16 tbl_tx_filter_coef_rev4[7][15];
+
+extern const struct nphy_rf_control_override_rev2
+ tbl_rf_control_override_rev2[];
+extern const struct nphy_rf_control_override_rev3
+ tbl_rf_control_override_rev3[];
#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 0a86bdf53154..e91520d0312e 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -37,6 +37,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <net/dst.h>
/* 32bit DMA ops. */
@@ -1411,7 +1412,6 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1527,25 +1527,6 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
spin_unlock(&ring->lock);
}
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43legacy_dmaring *ring;
- unsigned long flags;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = priority_to_txring(dev, i);
-
- spin_lock_irqsave(&ring->lock, flags);
- stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
- stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
- stats[i].count = ring->nr_tx_packets;
- spin_unlock_irqrestore(&ring->lock, flags);
- }
-}
-
static void dma_rx(struct b43legacy_dmaring *ring,
int *slot)
{
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 2f186003c31e..f9681041c2d8 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -243,8 +243,6 @@ struct b43legacy_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -292,9 +290,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
@@ -315,11 +310,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
{
}
static inline
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/b43legacy/leds.h b/drivers/net/wireless/b43legacy/leds.h
index 82167a90088f..9ff6750dc57f 100644
--- a/drivers/net/wireless/b43legacy/leds.h
+++ b/drivers/net/wireless/b43legacy/leds.h
@@ -45,7 +45,7 @@ enum b43legacy_led_behaviour {
void b43legacy_leds_init(struct b43legacy_wldev *dev);
void b43legacy_leds_exit(struct b43legacy_wldev *dev);
-#else /* CONFIG_B43EGACY_LEDS */
+#else /* CONFIG_B43LEGACY_LEDS */
/* LED support disabled */
struct b43legacy_led {
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4a905b6a886b..bb2dd9329aa0 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -40,6 +40,7 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <net/dst.h>
#include <asm/unaligned.h>
@@ -61,6 +62,8 @@ MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
+MODULE_FIRMWARE("b43legacy/ucode2.fw");
+MODULE_FIRMWARE("b43legacy/ucode4.fw");
#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
static int modparam_pio;
@@ -2444,29 +2447,6 @@ static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
return 0;
}
-static int b43legacy_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- unsigned long flags;
- int err = -ENODEV;
-
- if (!dev)
- goto out;
- spin_lock_irqsave(&wl->irq_lock, flags);
- if (likely(b43legacy_status(dev) >= B43legacy_STAT_STARTED)) {
- if (b43legacy_using_pio(dev))
- b43legacy_pio_get_tx_stats(dev, stats);
- else
- b43legacy_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- spin_unlock_irqrestore(&wl->irq_lock, flags);
-out:
- return err;
-}
-
static int b43legacy_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2921,6 +2901,7 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
goto out;
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_STARTED);
/* Start data flow (TX/RX) */
@@ -3341,6 +3322,7 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
b43legacy_leds_init(dev);
@@ -3361,7 +3343,7 @@ err_kfree_lo_control:
}
static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
@@ -3370,23 +3352,23 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43legacydbg(wl, "Adding Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3403,18 +3385,18 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
}
static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
unsigned long flags;
- b43legacydbg(wl, "Removing Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43legacy_WARN_ON(!wl->operating);
- B43legacy_WARN_ON(wl->vif != conf->vif);
+ B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
@@ -3509,7 +3491,6 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.bss_info_changed = b43legacy_op_bss_info_changed,
.configure_filter = b43legacy_op_configure_filter,
.get_stats = b43legacy_op_get_stats,
- .get_tx_stats = b43legacy_op_get_tx_stats,
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
@@ -3960,7 +3941,7 @@ static struct ssb_driver b43legacy_ssb_driver = {
static void b43legacy_print_driverinfo(void)
{
- const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "",
+ const char *feat_pci = "", *feat_leds = "",
*feat_pio = "", *feat_dma = "";
#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
@@ -3969,9 +3950,6 @@ static void b43legacy_print_driverinfo(void)
#ifdef CONFIG_B43LEGACY_LEDS
feat_leds = "L";
#endif
-#ifdef CONFIG_B43LEGACY_RFKILL
- feat_rfkill = "R";
-#endif
#ifdef CONFIG_B43LEGACY_PIO
feat_pio = "I";
#endif
@@ -3979,9 +3957,9 @@ static void b43legacy_print_driverinfo(void)
feat_dma = "D";
#endif
printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
- "[ Features: %s%s%s%s%s, Firmware-ID: "
+ "[ Features: %s%s%s%s, Firmware-ID: "
B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
- feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
+ feat_pci, feat_leds, feat_pio, feat_dma);
}
static int __init b43legacy_init(void)
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index aaf227203a98..35033dd342ce 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "b43legacy.h"
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 51866c9a2769..b033b0ed4ca0 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -29,6 +29,7 @@
#include "xmit.h"
#include <linux/delay.h>
+#include <linux/slab.h>
static void tx_start(struct b43legacy_pioqueue *queue)
@@ -477,7 +478,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
list_move_tail(&packet->list, &queue->txqueue);
queue->nr_txfree--;
- queue->nr_tx_packets++;
B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
tasklet_schedule(&queue->txtask);
@@ -546,18 +546,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
tasklet_schedule(&queue->txtask);
}
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_pio *pio = &dev->pio;
- struct b43legacy_pioqueue *queue;
-
- queue = pio->queue1;
- stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
- stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
- stats[0].count = queue->nr_tx_packets;
-}
-
static void pio_rx_error(struct b43legacy_pioqueue *queue,
int clear_buffers,
const char *error)
diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h
index 464fec05a06d..8e6773ea6e75 100644
--- a/drivers/net/wireless/b43legacy/pio.h
+++ b/drivers/net/wireless/b43legacy/pio.h
@@ -74,10 +74,6 @@ struct b43legacy_pioqueue {
* posted to the device. We are waiting for the txstatus.
*/
struct list_head txrunning;
- /* Total number or packets sent.
- * (This counter can obviously wrap).
- */
- unsigned int nr_tx_packets;
struct tasklet_struct txtask;
struct b43legacy_pio_txpacket
tx_packets_cache[B43legacy_PIO_MAXTXPACKETS];
@@ -106,8 +102,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status);
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
/* Suspend TX queue in hardware. */
@@ -140,11 +134,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
{
}
static inline
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
{
}
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index 3816df96a663..f4c56121d387 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,4 +1,5 @@
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <net/lib80211.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 90108b698f11..c34a3b7f1292 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,3 +1,5 @@
+#include <linux/slab.h>
+
#include "hostap_80211.h"
#include "hostap_common.h"
#include "hostap_wlan.h"
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index a2a203c90ba3..7e72ac1de49b 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/random.h>
#include <linux/if_arp.h>
+#include <linux/slab.h>
#include "hostap_wlan.h"
#include "hostap.h"
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index c9640a3e02c9..a36501dbbe02 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if.h>
+#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
@@ -794,13 +795,6 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID123(
- "Intersil", "PRISM 2_5 PCMCIA ADAPTER", "ISL37300P",
- 0x4b801a17, 0x6345a0bf, 0xc9049a39),
- /* D-Link DWL-650 Rev. P1; manfid 0x000b, 0x7110 */
- PCMCIA_DEVICE_PROD_ID123(
- "D-Link", "DWL-650 Wireless PC Card RevP", "ISL37101P-10",
- 0x1a424a1c, 0x6ea57632, 0xdd97a26b),
- PCMCIA_DEVICE_PROD_ID123(
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
@@ -834,14 +828,12 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
"Ver. 1.00",
0x5cd01705, 0x4271660f, 0x9d08ee12),
PCMCIA_DEVICE_PROD_ID123(
- "corega", "WL PCCL-11", "ISL37300P",
- 0xa21501a, 0x59868926, 0xc9049a39),
- PCMCIA_DEVICE_PROD_ID123(
- "The Linksys Group, Inc.", "Wireless Network CF Card", "ISL37300P",
- 0xa5f472c2, 0x9c05598d, 0xc9049a39),
- PCMCIA_DEVICE_PROD_ID123(
"Wireless LAN" , "11Mbps PC Card", "Version 01.02",
0x4b8870ff, 0x70e946d1, 0x4b74baa0),
+ PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
+ PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
+ PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
+ PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c882184..d70732819423 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2618,6 +2618,15 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
int events = 0;
u16 ev;
+ /* Detect early interrupt before driver is fully configued */
+ if (!dev->base_addr) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
+ dev->name);
+ }
+ return IRQ_HANDLED;
+ }
+
iface = netdev_priv(dev);
local = iface->local;
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 4dfb40a84c96..d737091cf6ac 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -2,6 +2,7 @@
#include <linux/if_arp.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 9419cebca8a5..9a082308a9d4 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,5 +1,6 @@
/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f2..d24dc7dc0723 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -9,6 +9,7 @@
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
@@ -39,7 +40,7 @@ struct hostap_pci_priv {
/* FIX: do we need mb/wmb/rmb with memory operations? */
-static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a44..33e79037770b 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -12,6 +12,7 @@
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
@@ -60,7 +61,7 @@ struct hostap_plx_priv {
#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
-static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f81..9b72c45a7748 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
-static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bedc..8d72e3d19586 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -31,6 +31,7 @@
******************************************************************************/
#include <linux/sched.h>
+#include <linux/slab.h>
#include "ipw2200.h"
@@ -3177,14 +3178,27 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
int total_nr = 0;
int i;
struct pci_pool *pool;
- u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
- dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
+ void **virts;
+ dma_addr_t *phys;
IPW_DEBUG_TRACE("<< : \n");
+ virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
+ GFP_KERNEL);
+ if (!virts)
+ return -ENOMEM;
+
+ phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
+ GFP_KERNEL);
+ if (!phys) {
+ kfree(virts);
+ return -ENOMEM;
+ }
pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
if (!pool) {
IPW_ERROR("pci_pool_create failed\n");
+ kfree(phys);
+ kfree(virts);
return -ENOMEM;
}
@@ -3254,6 +3268,8 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
pci_pool_free(pool, virts[i], phys[i]);
pci_pool_destroy(pool);
+ kfree(phys);
+ kfree(virts);
return ret;
}
@@ -11524,7 +11540,7 @@ out:
}
/* PCI driver stuff */
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index bf45391172f3..a6d5e42647e4 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -797,7 +797,7 @@ struct libipw_device {
/* Probe / Beacon management */
struct list_head network_free_list;
struct list_head network_list;
- struct libipw_network *networks;
+ struct libipw_network *networks[MAX_NETWORK_COUNT];
int scans;
int scan_age;
diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/ipw2x00/libipw_geo.c
index 65e8c175a4a0..c9fe3c99cb00 100644
--- a/drivers/net/wireless/ipw2x00/libipw_geo.c
+++ b/drivers/net/wireless/ipw2x00/libipw_geo.c
@@ -34,7 +34,6 @@
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 1ae0b2b02c38..2fa55867bd8b 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -67,16 +67,17 @@ void *libipw_wiphy_privid = &libipw_wiphy_privid;
static int libipw_networks_allocate(struct libipw_device *ieee)
{
- if (ieee->networks)
- return 0;
-
- ieee->networks =
- kzalloc(MAX_NETWORK_COUNT * sizeof(struct libipw_network),
- GFP_KERNEL);
- if (!ieee->networks) {
- printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
- ieee->dev->name);
- return -ENOMEM;
+ int i, j;
+
+ for (i = 0; i < MAX_NETWORK_COUNT; i++) {
+ ieee->networks[i] = kzalloc(sizeof(struct libipw_network),
+ GFP_KERNEL);
+ if (!ieee->networks[i]) {
+ LIBIPW_ERROR("Out of memory allocating beacons\n");
+ for (j = 0; j < i; j++)
+ kfree(ieee->networks[j]);
+ return -ENOMEM;
+ }
}
return 0;
@@ -97,15 +98,11 @@ static inline void libipw_networks_free(struct libipw_device *ieee)
{
int i;
- if (!ieee->networks)
- return;
-
- for (i = 0; i < MAX_NETWORK_COUNT; i++)
- if (ieee->networks[i].ibss_dfs)
- kfree(ieee->networks[i].ibss_dfs);
-
- kfree(ieee->networks);
- ieee->networks = NULL;
+ for (i = 0; i < MAX_NETWORK_COUNT; i++) {
+ if (ieee->networks[i]->ibss_dfs)
+ kfree(ieee->networks[i]->ibss_dfs);
+ kfree(ieee->networks[i]);
+ }
}
void libipw_networks_age(struct libipw_device *ieee,
@@ -130,7 +127,7 @@ static void libipw_networks_initialize(struct libipw_device *ieee)
INIT_LIST_HEAD(&ieee->network_free_list);
INIT_LIST_HEAD(&ieee->network_list);
for (i = 0; i < MAX_NETWORK_COUNT; i++)
- list_add_tail(&ieee->networks[i].list,
+ list_add_tail(&ieee->networks[i]->list,
&ieee->network_free_list);
}
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 282b1f7ff1e9..39a34da52d52 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
+#include <linux/gfp.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
@@ -24,7 +25,6 @@
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
-#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 4d89f66f53b2..3633c6682e49 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -31,6 +31,7 @@
******************************************************************************/
#include <linux/kmod.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/jiffies.h>
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b16b06c2031f..dc8ed1527666 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,8 @@
config IWLWIFI
tristate "Intel Wireless Wifi"
- depends on PCI && MAC80211 && EXPERIMENTAL
+ depends on PCI && MAC80211
select FW_LOADER
-config IWLWIFI_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwlagn driver"
- depends on IWLWIFI
- ---help---
- This option will enable spectrum measurement for the iwlagn driver.
-
config IWLWIFI_DEBUG
bool "Enable full debugging output in iwlagn and iwl3945 drivers"
depends on IWLWIFI
@@ -120,9 +114,3 @@ config IWL3945
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwl3945.
-
-config IWL3945_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwl3945 driver"
- depends on IWL3945
- ---help---
- This option will enable spectrum measurement for the iwl3945 driver.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7f82044af242..4e378faee650 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -3,7 +3,6 @@ iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
iwlcore-objs += iwl-scan.o iwl-led.o
iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
CFLAGS_iwl-devtrace.o := -I$(src)
@@ -20,3 +19,5 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8414178bcff4..3bf2e6e9b2d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -89,8 +89,78 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
+static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+ .min_nrg_cck = 95,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+ .auto_corr_min_ofdm = 90,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 120,
+ .auto_corr_min_ofdm_mrc_x1 = 240,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 155,
+ .auto_corr_max_ofdm_mrc_x1 = 290,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 170,
+ .auto_corr_max_cck_mrc = 400,
+ .nrg_th_cck = 95,
+ .nrg_th_ofdm = 95,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwl5000_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl1000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_TX_IQ_PERD) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ return 0;
+}
+
static struct iwl_lib_ops iwl1000_lib = {
- .set_hw_params = iwl5000_hw_set_hw_params,
+ .set_hw_params = iwl1000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched,
@@ -105,6 +175,8 @@ static struct iwl_lib_ops iwl1000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -138,9 +210,10 @@ static struct iwl_lib_ops iwl1000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl1000_ops = {
+static const struct iwl_ops iwl1000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl1000_lib,
.hcmd = &iwl5000_hcmd,
@@ -173,7 +246,8 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl1000_bg_cfg = {
@@ -200,6 +274,8 @@ struct iwl_cfg iwl1000_bg_cfg = {
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 08ce259a0e60..042f6bc0df13 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 6fd10d443ba3..3a876a8ece38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index a871d09d598f..abe2b739c4dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 5a1033ca7aaa..ce990adc51e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index d4b49883b30e..902c4d4293e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <linux/wireless.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 7da1dab933d9..0728054a22d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -45,8 +46,8 @@
#include "iwl-sta.h"
#include "iwl-3945.h"
#include "iwl-eeprom.h"
-#include "iwl-helpers.h"
#include "iwl-core.h"
+#include "iwl-helpers.h"
#include "iwl-led.h"
#include "iwl-3945-led.h"
@@ -184,7 +185,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
{
int idx;
- for (idx = 0; idx < IWL_RATE_COUNT; idx++)
+ for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
if (iwl3945_rates[idx].plcp == plcp)
return idx;
return -1;
@@ -681,19 +682,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
snr = rx_stats_sig_avg / rx_stats_noise_diff;
rx_status.noise = rx_status.signal -
iwl3945_calc_db_from_ratio(snr);
- rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
- rx_status.noise);
-
- /* If noise info not available, calculate signal quality indicator (%)
- * using just the dBm signal level. */
} else {
rx_status.noise = priv->last_rx_noise;
- rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
}
- IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_status.noise, rx_status.qual,
+ IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
+ rx_status.signal, rx_status.noise,
rx_stats_sig_avg, rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -811,7 +806,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
int sta_id, int tx_id)
{
u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
- u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT - 1);
+ u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
u16 rate_mask;
int rate;
u8 rts_retry_limit;
@@ -1835,8 +1830,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
rc = -EIO;
}
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -1958,11 +1952,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
}
/* Add the broadcast address so we can send broadcast frames */
- if (iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL) ==
- IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
- return -EIO;
- }
+ priv->cfg->ops->lib->add_bcast_station(priv);
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -2157,7 +2147,7 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
/* fill in channel group's nominal powers for each rate */
for (rate_index = 0;
- rate_index < IWL_RATE_COUNT; rate_index++, clip_pwrs++) {
+ rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
switch (rate_index) {
case IWL_RATE_36M_INDEX_TABLE:
if (i == 0) /* B/G */
@@ -2481,11 +2471,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
memset((void *)&priv->hw_params, 0,
sizeof(struct iwl_hw_params));
- priv->shared_virt =
- pci_alloc_consistent(priv->pci_dev,
- sizeof(struct iwl3945_shared),
- &priv->shared_phys);
-
+ priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ &priv->shared_phys, GFP_KERNEL);
if (!priv->shared_virt) {
IWL_ERR(priv, "failed to allocate pci memory\n");
mutex_unlock(&priv->mutex);
@@ -2803,6 +2791,7 @@ static struct iwl_lib_ops iwl3945_lib = {
.post_associate = iwl3945_post_associate,
.isr = iwl_isr_legacy,
.config_ap = iwl3945_config_ap,
+ .add_bcast_station = iwl3945_add_bcast_station,
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
@@ -2811,7 +2800,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
};
-static struct iwl_ops iwl3945_ops = {
+static const struct iwl_ops iwl3945_ops = {
.ucode = &iwl3945_ucode,
.lib = &iwl3945_lib,
.hcmd = &iwl3945_hcmd,
@@ -2836,6 +2825,8 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
+ .broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2852,9 +2843,11 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
+ .broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
-struct pci_device_id iwl3945_hw_card_ids[] = {
+DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index ecc23ec1f6a4..452dfd5456c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -37,7 +37,7 @@
#include <net/ieee80211_radiotap.h>
/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern struct pci_device_id iwl3945_hw_card_ids[];
+extern const struct pci_device_id iwl3945_hw_card_ids[];
#include "iwl-csr.h"
#include "iwl-prph.h"
@@ -171,24 +171,6 @@ struct iwl3945_frame {
#define SCAN_INTERVAL 100
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_CONF_PENDING 18
-
#define MAX_TID_COUNT 9
#define IWL_INVALID_RATE 0xFF
@@ -222,12 +204,12 @@ struct iwl3945_ibss_seq {
*
*****************************************************************************/
extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,int left);
-extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index c606366b582c..67ef562e8db1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 386513b601f5..8972166386cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -581,6 +581,13 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
@@ -1204,7 +1211,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
/* calculate tx gain adjustment based on power supply voltage */
- voltage = priv->calib_info->voltage;
+ voltage = le16_to_cpu(priv->calib_info->voltage);
init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
voltage_compensation =
iwl4965_get_voltage_compensation(voltage, init_voltage);
@@ -1961,7 +1968,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
- int tid = MAX_TID_COUNT;
+ int uninitialized_var(tid);
int sta_id;
int freed;
u8 *qc = NULL;
@@ -2008,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ if (qc)
+ iwl_free_tfds_in_queue(priv, sta_id,
+ tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -2035,13 +2044,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ else if (sta_id == IWL_INVALID_STATION)
+ IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
-
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
@@ -2206,9 +2216,10 @@ static struct iwl_lib_ops iwl4965_lib = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl4965_ops = {
+static const struct iwl_ops iwl4965_ops = {
.ucode = &iwl4965_ucode,
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
@@ -2239,7 +2250,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 4ef6804a455a..714e032f6217 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,11 +92,15 @@
static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
- u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
- EEPROM_5000_TEMPERATURE);
- /* offset = temperature - voltage / coef */
- s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
- return offset;
+ u16 temperature, voltage;
+ __le16 *temp_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
+
+ temperature = le16_to_cpu(temp_calib[0]);
+ voltage = le16_to_cpu(temp_calib[1]);
+
+ /* offset = temp - volt / coeff */
+ return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
/* Fixed (non-configurable) rx data from phy */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e2f8615c8c9b..e476acb53aa7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -179,14 +179,24 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
data->delta_gain_code[i] = 0;
continue;
}
- delta_g = (1000 * ((s32)average_noise[default_chain] -
+
+ delta_g = (priv->cfg->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
+
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
- /* set negative sign */
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
data->delta_gain_code[i] |= (1 << 2);
}
@@ -263,8 +273,8 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 155,
- .auto_corr_max_ofdm_mrc_x1 = 290,
+ .auto_corr_max_ofdm_x1 = 120,
+ .auto_corr_max_ofdm_mrc_x1 = 240,
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
@@ -333,14 +343,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
- u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
+ __le16 *xtal_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
cmd.hdr.data_valid = 1;
- cmd.cap_pin1 = (u8)xtal_calib[0];
- cmd.cap_pin2 = (u8)xtal_calib[1];
+ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
(u8 *)&cmd, sizeof(cmd));
}
@@ -411,12 +422,14 @@ static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
/*
* ucode
*/
-static int iwl5000_load_section(struct iwl_priv *priv,
- struct fw_desc *image,
- u32 dst_addr)
+static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
+ struct fw_desc *image, u32 dst_addr)
{
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
+ int ret;
+
+ priv->ucode_write_complete = 0;
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
@@ -446,57 +459,36 @@ static int iwl5000_load_section(struct iwl_priv *priv,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- return 0;
-}
-
-static int iwl5000_load_given_ucode(struct iwl_priv *priv,
- struct fw_desc *inst_image,
- struct fw_desc *data_image)
-{
- int ret = 0;
-
- ret = iwl5000_load_section(priv, inst_image,
- IWL50_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- IWL_DEBUG_INFO(priv, "INST uCode section being loaded...\n");
+ IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
priv->ucode_write_complete, 5 * HZ);
if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ IWL_ERR(priv, "Could not load the %s uCode section due "
+ "to interrupt\n", name);
return ret;
}
if (!ret) {
- IWL_ERR(priv, "Could not load the INST uCode section\n");
+ IWL_ERR(priv, "Could not load the %s uCode section\n",
+ name);
return -ETIMEDOUT;
}
- priv->ucode_write_complete = 0;
-
- ret = iwl5000_load_section(
- priv, data_image, IWL50_RTC_DATA_LOWER_BOUND);
- if (ret)
- return ret;
+ return 0;
+}
- IWL_DEBUG_INFO(priv, "DATA uCode section being loaded...\n");
+static int iwl5000_load_given_ucode(struct iwl_priv *priv,
+ struct fw_desc *inst_image,
+ struct fw_desc *data_image)
+{
+ int ret = 0;
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ ret = iwl5000_load_section(priv, "INST", inst_image,
+ IWL50_RTC_INST_LOWER_BOUND);
+ if (ret)
return ret;
- } else if (!ret) {
- IWL_ERR(priv, "Could not load the DATA uCode section\n");
- return -ETIMEDOUT;
- } else
- ret = 0;
- priv->ucode_write_complete = 0;
-
- return ret;
+ return iwl5000_load_section(priv, "DATA", data_image,
+ IWL50_RTC_DATA_LOWER_BOUND);
}
int iwl5000_load_ucode(struct iwl_priv *priv)
@@ -656,6 +648,13 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* map qos queues to fifos one-to-one */
for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
int ac = iwl5000_default_queue_to_tx_fifo[i];
@@ -780,7 +779,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
@@ -799,12 +798,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
if (txq_id != IWL_CMD_QUEUE_NUM)
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1124,7 +1123,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
scd_ssn , index, txq_id, txq->swq_id);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1152,16 +1151,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
tx_resp->failure_frame);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwl_txq_check_empty(priv, sta_id, tid, txq_id);
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
@@ -1465,6 +1462,8 @@ struct iwl_lib_ops iwl5000_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1500,6 +1499,7 @@ struct iwl_lib_ops iwl5000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl5000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static struct iwl_lib_ops iwl5150_lib = {
@@ -1517,6 +1517,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1552,9 +1553,10 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
.set_ct_kill = iwl5150_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl5000_ops = {
+static const struct iwl_ops iwl5000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5000_lib,
.hcmd = &iwl5000_hcmd,
@@ -1562,7 +1564,7 @@ static struct iwl_ops iwl5000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_ops iwl5150_ops = {
+static const struct iwl_ops iwl5150_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5150_lib,
.hcmd = &iwl5000_hcmd,
@@ -1597,8 +1599,10 @@ struct iwl_cfg iwl5300_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1621,7 +1625,10 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_abg_cfg = {
@@ -1644,6 +1651,8 @@ struct iwl_cfg iwl5100_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_agn_cfg = {
@@ -1666,8 +1675,10 @@ struct iwl_cfg iwl5100_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5350_agn_cfg = {
@@ -1690,8 +1701,10 @@ struct iwl_cfg iwl5350_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_agn_cfg = {
@@ -1714,8 +1727,10 @@ struct iwl_cfg iwl5150_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_abg_cfg = {
@@ -1738,6 +1753,8 @@ struct iwl_cfg iwl5150_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index 90185777d98b..ddba39999997 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 74e571049273..92b3e64fc14d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -70,6 +70,14 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
+/* Indicate calibration version to uCode. */
+static void iwl6050_set_calib_version(struct iwl_priv *priv)
+{
+ if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
+ iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
+}
+
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
@@ -96,6 +104,8 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* else do nothing, uCode configured */
+ if (priv->cfg->ops->lib->temp_ops.set_calib_version)
+ priv->cfg->ops->lib->temp_ops.set_calib_version(priv);
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -108,7 +118,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.auto_corr_max_ofdm = 145,
.auto_corr_max_ofdm_mrc = 232,
- .auto_corr_max_ofdm_x1 = 145,
+ .auto_corr_max_ofdm_x1 = 110,
.auto_corr_max_ofdm_mrc_x1 = 232,
.auto_corr_min_cck = 125,
@@ -158,11 +168,25 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_6x50:
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ break;
+ default:
+ priv->hw_params.calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+ break;
+ }
+
return 0;
}
@@ -215,6 +239,8 @@ static struct iwl_lib_ops iwl6000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -233,7 +259,7 @@ static struct iwl_lib_ops iwl6000_lib = {
EEPROM_5000_REG_BAND_3_CHANNELS,
EEPROM_5000_REG_BAND_4_CHANNELS,
EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_5000_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
@@ -250,9 +276,10 @@ static struct iwl_lib_ops iwl6000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl6000_ops = {
+static const struct iwl_ops iwl6000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
.hcmd = &iwl5000_hcmd,
@@ -260,18 +287,68 @@ static struct iwl_ops iwl6000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
- .get_hcmd_size = iwl5000_get_hcmd_size,
- .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
- .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
- .calc_rssi = iwl5000_calc_rssi,
+static struct iwl_lib_ops iwl6050_lib = {
+ .set_hw_params = iwl6000_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwl5000_txq_set_sched,
+ .txq_agg_enable = iwl5000_txq_agg_enable,
+ .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
+ .txq_init = iwl_hw_tx_queue_init,
+ .rx_handler_setup = iwl5000_rx_handler_setup,
+ .setup_deferred_work = iwl5000_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .load_ucode = iwl5000_load_ucode,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
+ .init_alive_start = iwl5000_init_alive_start,
+ .alive_notify = iwl5000_alive_notify,
+ .send_tx_power = iwl5000_send_tx_power,
+ .update_chain_flags = iwl_update_chain_flags,
+ .set_channel_switch = iwl6000_hw_channel_switch,
+ .apm_ops = {
+ .init = iwl_apm_init,
+ .stop = iwl_apm_stop,
+ .config = iwl6000_nic_config,
+ .set_pwr_src = iwl_set_pwr_src,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_5000_REG_BAND_1_CHANNELS,
+ EEPROM_5000_REG_BAND_2_CHANNELS,
+ EEPROM_5000_REG_BAND_3_CHANNELS,
+ EEPROM_5000_REG_BAND_4_CHANNELS,
+ EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ },
+ .verify_signature = iwlcore_eeprom_verify_signature,
+ .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+ .release_semaphore = iwlcore_eeprom_release_semaphore,
+ .calib_version = iwl5000_eeprom_calib_version,
+ .query_addr = iwl5000_eeprom_query_addr,
+ .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ },
+ .post_associate = iwl_post_associate,
+ .isr = iwl_isr_ict,
+ .config_ap = iwl_config_ap,
+ .temp_ops = {
+ .temperature = iwl5000_temperature,
+ .set_ct_kill = iwl6000_set_ct_threshold,
+ .set_calib_version = iwl6050_set_calib_version,
+ },
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl6050_ops = {
+static const struct iwl_ops iwl6050_ops = {
.ucode = &iwl5000_ucode,
- .lib = &iwl6000_lib,
+ .lib = &iwl6050_lib,
.hcmd = &iwl5000_hcmd,
- .utils = &iwl6050_hcmd_utils,
+ .utils = &iwl5000_hcmd_utils,
.led = &iwlagn_led_ops,
};
@@ -306,7 +383,8 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -336,6 +414,8 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -365,6 +445,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6050_2agn_cfg = {
@@ -395,7 +477,8 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@@ -425,6 +508,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6000_3agn_cfg = {
@@ -455,7 +540,8 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 3bccba20f6da..1a24946bc203 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index ab55f92a161d..a594e4fdc6b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index fe511cbf012e..1460116d329f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <linux/wireless.h>
#include <net/mac80211.h>
@@ -150,7 +151,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
};
/* mbps, mcs */
-const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
{ "2", "QPSK DSSS"},
{"5.5", "BPSK CCK"},
@@ -298,10 +299,23 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
+ int ret;
+
if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
- ieee80211_start_tx_ba_session(sta, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ret = ieee80211_stop_tx_ba_session(sta, tid,
+ WLAN_BACK_INITIATOR);
+ }
}
}
@@ -332,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
!!(rate_n_flags & RATE_MCS_ANT_C_MSK);
}
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_index];
+ return 0;
+}
+
/**
* rs_collect_tx_data - Update the success/failure sliding window
*
@@ -339,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
* at this rate. window->data contains the bitmask of successful
* packets.
*/
-static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
- int scale_index, s32 tpt, int attempts,
- int successes)
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
{
struct iwl_rate_scale_data *window = NULL;
static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
- s32 fail_count;
+ s32 fail_count, tpt;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
/* Select window for current tx bit rate */
- window = &(windows[scale_index]);
+ window = &(tbl->win[scale_index]);
+
+ /* Get expected throughput */
+ tpt = get_expected_tpt(tbl, scale_index);
/*
* Keep track of only the latest 62 tx frame attempts in this rate's
@@ -725,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
(a->is_SGI == b->is_SGI);
}
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
- if (tbl->expected_tpt)
- return tbl->expected_tpt[rs_index];
- return 0;
-}
/*
* mac80211 sends us Tx status
@@ -751,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_priv *priv = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_rate_scale_data *window = NULL;
enum mac80211_rate_control_flags mac_flags;
u32 tx_rate;
struct iwl_scale_tbl_info tbl_type;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl;
- s32 tpt = 0;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@@ -839,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
return;
}
- window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
/*
* Updating the frame history depends on whether packets were
@@ -852,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
&rs_index);
- tpt = get_expected_tpt(curr_tbl, rs_index);
- rs_collect_tx_data(window, rs_index, tpt,
+ rs_collect_tx_data(curr_tbl, rs_index,
info->status.ampdu_ack_len,
info->status.ampdu_ack_map);
@@ -883,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
* table as active/search.
*/
if (table_type_matches(&tbl_type, curr_tbl))
- tpt = get_expected_tpt(curr_tbl, rs_index);
+ tmp_tbl = curr_tbl;
else if (table_type_matches(&tbl_type, other_tbl))
- tpt = get_expected_tpt(other_tbl, rs_index);
+ tmp_tbl = other_tbl;
else
continue;
-
- /* Constants mean 1 transmission, 0 successes */
- if (i < retries)
- rs_collect_tx_data(window, rs_index, tpt, 1,
- 0);
- else
- rs_collect_tx_data(window, rs_index, tpt, 1,
- legacy_success);
+ rs_collect_tx_data(tmp_tbl, rs_index, 1,
+ i < retries ? 0 : legacy_success);
}
/* Update success/fail counts if not searching for new mode */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index affc0c5a2f2c..e71923961e69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -191,7 +191,7 @@ enum {
IWL_RATE_2M_MASK)
#define IWL_CCK_RATES_MASK \
- (IWL_BASIC_RATES_MASK | \
+ (IWL_CCK_BASIC_RATES_MASK | \
IWL_RATE_5M_MASK | \
IWL_RATE_11M_MASK)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b8377efb3ba7..bdff56583e11 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
@@ -73,13 +74,7 @@
#define VD
#endif
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
+#define DRV_VERSION IWLWIFI_VERSION VD
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -203,7 +198,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
priv->start_calib = 0;
/* Add the broadcast address so we can send broadcast frames */
- iwl_add_bcast_station(priv);
+ priv->cfg->ops->lib->add_bcast_station(priv);
+
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -657,6 +653,131 @@ static void iwl_bg_statistics_periodic(unsigned long data)
iwl_send_statistics_request(priv, CMD_ASYNC, false);
}
+
+static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+ u32 start_idx, u32 num_events,
+ u32 mode)
+{
+ u32 i;
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (mode == 0)
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+ else
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(priv)) {
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return;
+ }
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /*
+ * "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing.
+ */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ 0, time, ev);
+ } else {
+ data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ time, data, ev);
+ }
+ }
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static void iwl_continuous_event_trace(struct iwl_priv *priv)
+{
+ u32 capacity; /* event log capacity in # entries */
+ u32 base; /* SRAM byte address of event log header */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ capacity = iwl_read_targ_mem(priv, base);
+ num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ } else
+ return;
+
+ if (num_wraps == priv->event_log.num_wraps) {
+ iwl_print_cont_event_trace(priv,
+ base, priv->event_log.next_entry,
+ next_entry - priv->event_log.next_entry,
+ mode);
+ priv->event_log.non_wraps_count++;
+ } else {
+ if ((num_wraps - priv->event_log.num_wraps) > 1)
+ priv->event_log.wraps_more_count++;
+ else
+ priv->event_log.wraps_once_count++;
+ trace_iwlwifi_dev_ucode_wrap_event(priv,
+ num_wraps - priv->event_log.num_wraps,
+ next_entry, priv->event_log.next_entry);
+ if (next_entry < priv->event_log.next_entry) {
+ iwl_print_cont_event_trace(priv, base,
+ priv->event_log.next_entry,
+ capacity - priv->event_log.next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ } else {
+ iwl_print_cont_event_trace(priv, base,
+ next_entry, capacity - next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ }
+ }
+ priv->event_log.num_wraps = num_wraps;
+ priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl_bg_ucode_trace(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->event_log.ucode_trace) {
+ iwl_continuous_event_trace(priv);
+ /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ }
+}
+
static void iwl_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -689,12 +810,14 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- RF_CARD_DISABLED)) {
+ CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
@@ -708,10 +831,10 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
- if (flags & RF_CARD_DISABLED)
+ if (flags & CT_CARD_DISABLED)
iwl_tt_enter_ct_kill(priv);
}
- if (!(flags & RF_CARD_DISABLED))
+ if (!(flags & CT_CARD_DISABLED))
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
@@ -761,6 +884,8 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -774,7 +899,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
/* status change handler */
@@ -1135,7 +1259,15 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
*/
- iwl_write32(priv, CSR_INT, priv->inta);
+ /* There is a hardware bug in the interrupt mask function that some
+ * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+ * they are disabled in the CSR_INT_MASK register. Furthermore the
+ * ICT interrupt handling mechanism has another bug that might cause
+ * these unmasked interrupts fail to be detected. We workaround the
+ * hardware bugs here by ACKing all the possible interrupts so that
+ * interrupt coalescing can still be achieved.
+ */
+ iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
inta = priv->inta;
@@ -1340,59 +1472,66 @@ static void iwl_nic_start(struct iwl_priv *priv)
}
+static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+static int iwl_mac_setup_register(struct iwl_priv *priv);
+
+static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
+{
+ const char *name_pre = priv->cfg->fw_name_pre;
+
+ if (first)
+ priv->fw_index = priv->cfg->ucode_api_max;
+ else
+ priv->fw_index--;
+
+ if (priv->fw_index < priv->cfg->ucode_api_min) {
+ IWL_ERR(priv, "no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(priv->firmware_name, "%s%d%s",
+ name_pre, priv->fw_index, ".ucode");
+
+ IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
+ priv->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
+ &priv->pci_dev->dev, GFP_KERNEL, priv,
+ iwl_ucode_callback);
+}
+
/**
- * iwl_read_ucode - Read uCode images from disk file.
+ * iwl_ucode_callback - callback when firmware was loaded
*
- * Copy into buffers for card to fetch via bus-mastering
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
*/
-static int iwl_read_ucode(struct iwl_priv *priv)
+static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
{
+ struct iwl_priv *priv = context;
struct iwl_ucode_header *ucode;
- int ret = -EINVAL, index;
- const struct firmware *ucode_raw;
- const char *name_pre = priv->cfg->fw_name_pre;
const unsigned int api_max = priv->cfg->ucode_api_max;
const unsigned int api_min = priv->cfg->ucode_api_min;
- char buf[25];
u8 *src;
size_t len;
u32 api_ver, build;
u32 inst_size, data_size, init_size, init_data_size, boot_size;
+ int err;
u16 eeprom_ver;
- /* Ask kernel firmware_class module to get the boot firmware off disk.
- * request_firmware() is synchronous, file is in memory on return. */
- for (index = api_max; index >= api_min; index--) {
- sprintf(buf, "%s%d%s", name_pre, index, ".ucode");
- ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
- if (ret < 0) {
- IWL_ERR(priv, "%s firmware file req failed: %d\n",
- buf, ret);
- if (ret == -ENOENT)
- continue;
- else
- goto error;
- } else {
- if (index < api_max)
- IWL_ERR(priv, "Loaded firmware %s, "
- "which is deprecated. "
- "Please use API v%u instead.\n",
- buf, api_max);
-
- IWL_DEBUG_INFO(priv, "Got firmware '%s' file (%zd bytes) from disk\n",
- buf, ucode_raw->size);
- break;
- }
+ if (!ucode_raw) {
+ IWL_ERR(priv, "request for firmware file '%s' failed.\n",
+ priv->firmware_name);
+ goto try_again;
}
- if (ret < 0)
- goto error;
+ IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
+ priv->firmware_name, ucode_raw->size);
/* Make sure that we got at least the v1 header! */
if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) {
IWL_ERR(priv, "File size way too small!\n");
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
/* Data from ucode file: header followed by uCode images */
@@ -1417,10 +1556,9 @@ static int iwl_read_ucode(struct iwl_priv *priv)
IWL_ERR(priv, "Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n",
api_max, api_ver);
- priv->ucode_ver = 0;
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
+
if (api_ver != api_max)
IWL_ERR(priv, "Firmware has old API version. Expected v%u, "
"got v%u. New firmware can be obtained "
@@ -1462,6 +1600,12 @@ static int iwl_read_ucode(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
boot_size);
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
/* Verify size of file vs. image size info in file's header */
if (ucode_raw->size !=
priv->cfg->ops->ucode->get_header_size(api_ver) +
@@ -1471,41 +1615,35 @@ static int iwl_read_ucode(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv,
"uCode file size %d does not match expected size\n",
(int)ucode_raw->size);
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
/* Verify that uCode images will fit in card's SRAM */
if (inst_size > priv->hw_params.max_inst_size) {
IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
inst_size);
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
if (data_size > priv->hw_params.max_data_size) {
IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
data_size);
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
if (init_size > priv->hw_params.max_inst_size) {
IWL_INFO(priv, "uCode init instr len %d too large to fit in\n",
init_size);
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
if (init_data_size > priv->hw_params.max_data_size) {
IWL_INFO(priv, "uCode init data len %d too large to fit in\n",
init_data_size);
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
if (boot_size > priv->hw_params.max_bsm_size) {
IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n",
boot_size);
- ret = -EINVAL;
- goto err_release;
+ goto try_again;
}
/* Allocate ucode buffers for card's bus-master loading ... */
@@ -1589,20 +1727,36 @@ static int iwl_read_ucode(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len);
memcpy(priv->ucode_boot.v_addr, src, len);
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = iwl_mac_setup_register(priv);
+ if (err)
+ goto out_unbind;
+
+ err = iwl_dbgfs_register(priv, DRV_NAME);
+ if (err)
+ IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
+
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
- return 0;
+ return;
+
+ try_again:
+ /* try next, if any */
+ if (iwl_request_firmware(priv, false))
+ goto out_unbind;
+ release_firmware(ucode_raw);
+ return;
err_pci_alloc:
IWL_ERR(priv, "failed to allocate pci memory\n");
- ret = -ENOMEM;
iwl_dealloc_ucode_pci(priv);
-
- err_release:
+ out_unbind:
+ device_release_driver(&priv->pci_dev->dev);
release_firmware(ucode_raw);
-
- error:
- return ret;
}
static const char *desc_lookup_text[] = {
@@ -1634,7 +1788,7 @@ static const char *desc_lookup_text[] = {
"DEBUG_1",
"DEBUG_2",
"DEBUG_3",
- "UNKNOWN"
+ "ADVANCED SYSASSERT"
};
static const char *desc_lookup(int i)
@@ -1705,8 +1859,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
* iwl_print_event_log - Dump error event log to syslog
*
*/
-static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1716,7 +1871,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
else
@@ -1744,27 +1899,44 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1772,21 +1944,26 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
} else {
- if (next_entry < size)
- iwl_print_event_log(priv, 0, next_entry, mode);
- else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ if (next_entry < size) {
+ pos = iwl_print_event_log(priv, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1794,7 +1971,8 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1802,6 +1980,8 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
@@ -1812,7 +1992,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
IWL_ERR(priv,
"Invalid event log pointer 0x%08X for %s uCode\n",
base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
+ return -EINVAL;
}
/* event log header */
@@ -1838,11 +2018,11 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+ if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -1853,6 +2033,15 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/*
* if uCode has wrapped back to top of log,
@@ -1860,17 +2049,22 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode, pos, buf, bufsz);
} else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
+ return pos;
}
/**
@@ -2276,18 +2470,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
return;
}
-static void iwl_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -2304,7 +2486,13 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -2440,7 +2628,7 @@ void iwl_post_associate(struct iwl_priv *priv)
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_setup_mac(struct iwl_priv *priv)
+static int iwl_mac_setup_register(struct iwl_priv *priv)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -2456,12 +2644,16 @@ static int iwl_setup_mac(struct iwl_priv *priv)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ if (priv->cfg->sku & IWL_SKU_N)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
hw->sta_data_size = sizeof(struct iwl_station_priv);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS;
/*
@@ -2506,21 +2698,7 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
/* we should be verifying the device is ready to be opened */
mutex_lock(&priv->mutex);
-
- /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
- * ucode filename and max sizes are card-specific. */
-
- if (!priv->ucode_code.len) {
- ret = iwl_read_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Could not read microcode: %d\n", ret);
- mutex_unlock(&priv->mutex);
- return ret;
- }
- }
-
ret = __iwl_up(priv);
-
mutex_unlock(&priv->mutex);
if (ret)
@@ -2668,14 +2846,18 @@ void iwl_config_ap(struct iwl_priv *priv)
}
static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
- iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key);
+ iwl_update_tkip_key(priv, keyconf,
+ sta ? sta->addr : iwl_bcast_addr,
+ iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -2784,6 +2966,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
else
return ret;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* do nothing */
+ return -EOPNOTSUPP;
default:
IWL_DEBUG_HT(priv, "unknown\n");
return -EINVAL;
@@ -2833,6 +3018,8 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
break;
case STA_NOTIFY_AWAKE:
WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
sta_priv->asleep = false;
sta_id = iwl_find_station(priv, sta->addr);
if (sta_id != IWL_INVALID_STATION)
@@ -3109,7 +3296,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl_bg_up);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -3126,6 +3312,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->statistics_periodic.data = (unsigned long)priv;
priv->statistics_periodic.function = iwl_bg_statistics_periodic;
+ init_timer(&priv->ucode_trace);
+ priv->ucode_trace.data = (unsigned long)priv;
+ priv->ucode_trace.function = iwl_bg_ucode_trace;
+
if (!priv->cfg->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl_irq_tasklet, (unsigned long)priv);
@@ -3141,9 +3331,11 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->scan_check);
+ cancel_work_sync(&priv->start_internal_scan);
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
+ del_timer_sync(&priv->ucode_trace);
}
static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3173,13 +3365,13 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->ibss_beacon = NULL;
- spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
INIT_LIST_HEAD(&priv->free_frames);
mutex_init(&priv->mutex);
+ mutex_init(&priv->sync_cmd_mutex);
/* Clear the driver's (not device's) station table */
iwl_clear_stations_table(priv);
@@ -3189,6 +3381,14 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ priv->force_reset[IWL_RF_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_RF_RESET;
+ priv->force_reset[IWL_FW_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_FW_RELOAD;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3265,7 +3465,6 @@ static struct ieee80211_ops iwl_hw_ops = {
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
.get_stats = iwl_mac_get_stats,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
@@ -3361,10 +3560,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(unsigned long long) pci_resource_len(pdev, 0));
IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
- /* this spin lock will be used in apm_ops.init and EEPROM access
+ /* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
iwl_hw_detect(priv);
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
@@ -3439,9 +3647,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
- /**********************************
- * 8. Setup and register mac80211
- **********************************/
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
/* enable interrupts if needed: hw bug w/a */
pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
@@ -3452,14 +3660,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_enable_interrupts(priv);
- err = iwl_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -3471,6 +3671,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
+
+ err = iwl_request_firmware(priv, true);
+ if (err)
+ goto out_remove_sysfs;
+
return 0;
out_remove_sysfs:
@@ -3589,7 +3794,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
*****************************************************************************/
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static struct pci_device_id iwl_hw_card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#ifdef CONFIG_IWL4965
{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 95a57b36a7ea..8b516c5ff0bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,6 +60,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <linux/slab.h>
#include <net/mac80211.h>
#include "iwl-dev.h"
@@ -414,7 +415,6 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
static int iwl_sensitivity_write(struct iwl_priv *priv)
{
- int ret = 0;
struct iwl_sensitivity_cmd cmd ;
struct iwl_sensitivity_data *data = NULL;
struct iwl_host_cmd cmd_out = {
@@ -477,11 +477,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- ret = iwl_send_cmd(priv, &cmd_out);
- if (ret)
- IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
-
- return ret;
+ return iwl_send_cmd(priv, &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
@@ -812,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
}
}
+ /*
+ * The above algorithm sometimes fails when the ucode
+ * reports 0 for all chains. It's not clear why that
+ * happens to start with, but it is then causing trouble
+ * because this can make us enable more chains than the
+ * hardware really has.
+ *
+ * To be safe, simply mask out any chains that we know
+ * are not on the device.
+ */
+ active_chains &= priv->hw_params.valid_rx_ant;
+
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index b6cef989a796..2b7b1df83ba0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e91507531923..6383d9f8c9b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -120,7 +120,6 @@ enum {
CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
/* 802.11h related */
- RADAR_NOTIFICATION = 0x70, /* not used */
REPLY_QUIET_CMD = 0x71, /* not used */
REPLY_CHANNEL_SWITCH = 0x72,
CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2248,10 +2247,22 @@ struct iwl_link_quality_cmd {
__le32 reserved2;
} __attribute__ ((packed));
+/*
+ * BT configuration enable flags:
+ * bit 0 - 1: BT channel announcement enabled
+ * 0: disable
+ * bit 1 - 1: priority of BT device enabled
+ * 0: disable
+ * bit 2 - 1: BT 2 wire support enabled
+ * 0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY BIT(1)
+#define BT_ENABLE_2_WIRE BIT(2)
+
#define BT_COEX_DISABLE (0x0)
-#define BT_COEX_MODE_2W (0x1)
-#define BT_COEX_MODE_3W (0x2)
-#define BT_COEX_MODE_4W (0x3)
+#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
#define BT_LEAD_TIME_MIN (0x0)
#define BT_LEAD_TIME_DEF (0x1E)
@@ -2510,7 +2521,7 @@ struct iwl_card_state_notif {
#define HW_CARD_DISABLED 0x01
#define SW_CARD_DISABLED 0x02
-#define RF_CARD_DISABLED 0x04
+#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
struct iwl_ct_kill_config {
@@ -2612,6 +2623,7 @@ struct iwl_ssid_ie {
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
#define IWL_GOOD_CRC_TH cpu_to_le16(1)
#define IWL_MAX_SCAN_SIZE 1024
+#define IWL_MAX_CMD_SIZE 4096
#define IWL_MAX_PROBE_REQUEST 200
/*
@@ -2984,7 +2996,7 @@ struct statistics_rx_ht_phy {
__le32 agg_crc32_good;
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
- __le32 reserved2;
+ __le32 unsupport_mcs;
} __attribute__ ((packed));
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3099,8 @@ struct statistics_div {
} __attribute__ ((packed));
struct statistics_general {
- __le32 temperature;
- __le32 temperature_m;
+ __le32 temperature; /* radio temperature */
+ __le32 temperature_m; /* for 5000 and up, this is radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
@@ -3096,7 +3108,12 @@ struct statistics_general {
__le32 ttl_timestamp;
struct statistics_div div;
__le32 rx_enable_counter;
- __le32 reserved1;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
__le32 reserved2;
__le32 reserved3;
} __attribute__ ((packed));
@@ -3161,13 +3178,30 @@ struct iwl_notif_statistics {
/*
* MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
*/
-/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row,
- * then this notification will be sent. */
-#define CONSECUTIVE_MISSED_BCONS_TH 20
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
struct iwl_missed_beacon_notif {
- __le32 consequtive_missed_beacons;
+ __le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
@@ -3437,11 +3471,7 @@ enum {
IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
IWL_PHY_CALIBRATE_DC_CMD = 8,
IWL_PHY_CALIBRATE_LO_CMD = 9,
- IWL_PHY_CALIBRATE_RX_BB_CMD = 10,
IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
- IWL_PHY_CALIBRATE_RX_IQ_CMD = 12,
- IWL_PHY_CALIBRATION_NOISE_CMD = 13,
- IWL_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 574d36658702..049b652bcb5e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <net/mac80211.h>
#include "iwl-eeprom.h"
@@ -47,6 +48,26 @@ MODULE_VERSION(IWLWIFI_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
+
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
0, COEX_UNASSOC_IDLE_FLAGS},
@@ -257,8 +278,8 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
priv->cfg->ops->lib->apm_ops.init(priv);
- /* Set interrupt coalescing timer to 512 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 512 / 32);
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -287,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
- /* Allocate and init all Tx and Command queues */
- ret = iwl_txq_ctx_reset(priv);
- if (ret)
- return ret;
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!priv->txq) {
+ ret = iwl_txq_ctx_alloc(priv);
+ if (ret)
+ return ret;
+ } else
+ iwl_txq_ctx_reset(priv);
set_bit(STATUS_INIT, &priv->status);
@@ -450,8 +474,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
if (priv->cfg->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
- (priv->cfg->sm_ps_mode << 2));
max_bit_rate = MAX_BIT_RATE_20_MHZ;
if (priv->hw_params.ht40_channel & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -636,7 +658,7 @@ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
static bool is_single_rx_stream(struct iwl_priv *priv)
{
- return !priv->current_ht_config.is_ht ||
+ return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
priv->current_ht_config.single_chain_sufficient;
}
@@ -1003,28 +1025,18 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
*/
static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
{
- int idle_cnt = active_cnt;
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-
- /* # Rx chains when idling and maybe trying to save power */
- switch (priv->cfg->sm_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
- IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- case WLAN_HT_CAP_SM_PS_INVALID:
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (priv->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
default:
- IWL_ERR(priv, "invalid sm_ps mode %u\n",
- priv->cfg->sm_ps_mode);
- WARN_ON(1);
- break;
+ WARN(1, "invalid SMPS mode %d",
+ priv->current_ht_config.smps);
+ return active_cnt;
}
- return idle_cnt;
}
/* up to 4 chains */
@@ -1363,7 +1375,11 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
priv->cfg->ops->lib->dump_nic_error_log(priv);
- priv->cfg->ops->lib->dump_nic_event_log(priv, false);
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+ if (priv->cfg->ops->lib->dump_fh)
+ priv->cfg->ops->lib->dump_fh(priv, NULL, false);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
iwl_print_rx_config_cmd(priv);
@@ -1658,9 +1674,9 @@ EXPORT_SYMBOL(iwl_set_tx_power);
void iwl_free_isr_ict(struct iwl_priv *priv)
{
if (priv->ict_tbl_vir) {
- pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
- PAGE_SIZE, priv->ict_tbl_vir,
- priv->ict_tbl_dma);
+ dma_free_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ priv->ict_tbl_vir, priv->ict_tbl_dma);
priv->ict_tbl_vir = NULL;
}
}
@@ -1676,9 +1692,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
if (priv->cfg->use_isr_legacy)
return 0;
/* allocate shrared data table */
- priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
- ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma);
+ priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &priv->ict_tbl_dma, GFP_KERNEL);
if (!priv->ict_tbl_vir)
return -ENOMEM;
@@ -1813,6 +1829,16 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (val == 0xffffffff)
val = 0;
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
inta = (0xff & val) | ((0xff00 & val) << 16);
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
inta, inta_mask, val);
@@ -1975,13 +2001,20 @@ EXPORT_SYMBOL(iwl_isr_legacy);
int iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
- .flags = BT_COEX_MODE_4W,
.lead_time = BT_LEAD_TIME_DEF,
.max_kill = BT_MAX_KILL_DEF,
.kill_ack_mask = 0,
.kill_cts_mask = 0,
};
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ IWL_DEBUG_INFO(priv, "BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
sizeof(struct iwl_bt_cmd), &bt_cmd);
}
@@ -2344,6 +2377,21 @@ static void iwl_ht_conf(struct iwl_priv *priv,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static inline void iwl_set_no_assoc(struct iwl_priv *priv)
+{
+ priv->assoc_id = 0;
+ iwl_led_disassociate(priv);
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ priv->staging_rxon.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
+ priv->staging_rxon.assoc_id = 0;
+ iwlcore_commit_rxon(priv);
+}
+
#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2475,20 +2523,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
priv->cfg->ops->lib->post_associate(priv);
- } else {
- priv->assoc_id = 0;
- iwl_led_disassociate(priv);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * send
- */
- priv->staging_rxon.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = 0;
- iwlcore_commit_rxon(priv);
- }
+ } else
+ iwl_set_no_assoc(priv);
}
if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2503,12 +2539,14 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if ((changes & BSS_CHANGED_BEACON_ENABLED) &&
- vif->bss_conf.enable_beacon) {
- memcpy(priv->staging_rxon.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(priv->staging_rxon.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ iwlcore_config_ap(priv);
+ } else
+ iwl_set_no_assoc(priv);
}
mutex_unlock(&priv->mutex);
@@ -2594,44 +2632,43 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
EXPORT_SYMBOL(iwl_set_mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
- unsigned long flags;
+ int err = 0;
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
- IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
+ mutex_lock(&priv->mutex);
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
- spin_lock_irqsave(&priv->lock, flags);
- priv->vif = conf->vif;
- priv->iw_mode = conf->type;
-
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->vif = vif;
+ priv->iw_mode = vif->type;
- mutex_lock(&priv->mutex);
-
- if (conf->mac_addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr);
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (vif->addr) {
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
}
- if (iwl_set_mode(priv, conf->type) == -EAGAIN)
+ if (iwl_set_mode(priv, vif->type) == -EAGAIN)
/* we are not ready, will run again when ready */
set_bit(STATUS_MODE_PENDING, &priv->status);
+ out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return err;
}
EXPORT_SYMBOL(iwl_mac_add_interface);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -2644,7 +2681,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
}
- if (priv->vif == conf->vif) {
+ if (priv->vif == vif) {
priv->vif = NULL;
memset(priv->bssid, 0, ETH_ALEN);
}
@@ -2684,6 +2721,21 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
}
+ if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+ }
/* during scanning mac80211 will delay channel setting until
* scan finish with changed = 0
@@ -2740,6 +2792,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
priv->staging_rxon.flags = 0;
iwl_set_rxon_channel(priv, conf->channel);
+ iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, conf->channel->band);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2780,10 +2833,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
- /* call to ensure that 4965 rx_chain is set properly in monitor mode */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv);
-
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@@ -2806,42 +2855,6 @@ out:
}
EXPORT_SYMBOL(iwl_mac_config);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
- int i, avail;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < AC_NUM; i++) {
- txq = &priv->txq[i];
- q = &txq->q;
- avail = iwl_queue_space(q);
-
- stats[i].len = q->n_window - avail;
- stats[i].limit = q->n_window - q->high_mark;
- stats[i].count = q->n_window;
-
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_mac_get_tx_stats);
-
void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
@@ -3191,6 +3204,206 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
EXPORT_SYMBOL(iwl_update_stats);
#endif
+const static char *get_csr_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void iwl_dump_csr(struct iwl_priv *priv)
+{
+ int i;
+ u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(priv, "CSR values:\n");
+ IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(priv, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(priv, csr_tbl[i]));
+ }
+}
+EXPORT_SYMBOL(iwl_dump_csr);
+
+const static char *get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(priv, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(priv, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_dump_fh);
+
+static void iwl_force_rf_reset(struct iwl_priv *priv)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!iwl_is_associated(priv)) {
+ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+ return;
+ }
+ /*
+ * There is no easy and better way to force reset the radio,
+ * the only known method is switching channel which will force to
+ * reset and tune the radio.
+ * Use internal short scan (single channel) operation to should
+ * achieve this objective.
+ * Driver should reset the radio when number of consecutive missed
+ * beacon, or any other uCode error condition detected.
+ */
+ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+ iwl_internal_short_hw_scan(priv);
+}
+
+
+int iwl_force_reset(struct iwl_priv *priv, int mode)
+{
+ struct iwl_force_reset *force_reset;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EINVAL;
+
+ if (mode >= IWL_MAX_FORCE_RESET) {
+ IWL_DEBUG_INFO(priv, "invalid reset request.\n");
+ return -EINVAL;
+ }
+ force_reset = &priv->force_reset[mode];
+ force_reset->reset_request_count++;
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ IWL_DEBUG_INFO(priv, "force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+ IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
+ switch (mode) {
+ case IWL_RF_RESET:
+ iwl_force_rf_reset(priv);
+ break;
+ case IWL_FW_RESET:
+ IWL_ERR(priv, "On demand firmware reload\n");
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+ wake_up_interruptible(&priv->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(STATUS_READY, &priv->status);
+ queue_work(priv->workqueue, &priv->restart);
+ break;
+ }
+ return 0;
+}
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e7453..36940a9ec6b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,8 +63,6 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
-#include <generated/utsrelease.h>
-
/************************
* forward declarations *
************************/
@@ -72,8 +70,8 @@ struct iwl_host_cmd;
struct iwl_cmd;
-#define IWLWIFI_VERSION UTS_RELEASE "-k"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -119,6 +117,7 @@ struct iwl_apm_ops {
struct iwl_temp_ops {
void (*temperature)(struct iwl_priv *priv);
void (*set_ct_kill)(struct iwl_priv *priv);
+ void (*set_calib_version)(struct iwl_priv *priv);
};
struct iwl_ucode_ops {
@@ -169,8 +168,11 @@ struct iwl_lib_ops {
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
- void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log);
+ int (*dump_nic_event_log)(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
void (*dump_nic_error_log)(struct iwl_priv *priv);
+ void (*dump_csr)(struct iwl_priv *priv);
+ int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
/* power management */
struct iwl_apm_ops apm_ops;
@@ -187,6 +189,8 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
+ /* station management */
+ void (*add_bcast_station)(struct iwl_priv *priv);
};
struct iwl_led_ops {
@@ -230,8 +234,9 @@ struct iwl_mod_params {
* @chain_noise_num_beacons: number of beacons used to compute chain noise
* @adv_thermal_throttle: support advance thermal throttle
* @support_ct_kill_exit: support ct kill exit condition
- * @sm_ps_mode: spatial multiplexing power save mode
* @support_wimax_coexist: support wimax/wifi co-exist
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -287,8 +292,9 @@ struct iwl_cfg {
const bool supports_idle;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
- u8 sm_ps_mode;
const bool support_wimax_coexist;
+ u8 plcp_delta_threshold;
+ s32 chain_noise_scale;
};
/***************************
@@ -332,13 +338,11 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl_commit_rxon(struct iwl_priv *priv);
int iwl_set_mode(struct iwl_priv *priv, int mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwl_config_ap(struct iwl_priv *priv);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
@@ -411,13 +415,13 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
-int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
+void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_rx_replenish(struct iwl_priv *priv);
void iwl_rx_replenish_now(struct iwl_priv *priv);
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl_rx_queue_restock(struct iwl_priv *priv);
+void iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
@@ -425,6 +429,8 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -436,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* TX
******************************************************/
-int iwl_txq_ctx_reset(struct iwl_priv *priv);
+int iwl_txq_ctx_alloc(struct iwl_priv *priv);
+void iwl_txq_ctx_reset(struct iwl_priv *priv);
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
@@ -445,9 +452,13 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
-int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed);
+void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
+void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
@@ -495,6 +506,8 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+void iwl_internal_short_hw_scan(struct iwl_priv *priv);
+int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
@@ -525,14 +538,6 @@ int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
void iwl_calib_free_results(struct iwl_priv *priv);
-/*******************************************************************************
- * Spectrum Measureemtns in iwl-spectrum.c
- ******************************************************************************/
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv);
-#else
-static inline void iwl_setup_spectrum_handlers(struct iwl_priv *priv) {}
-#endif
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
@@ -581,7 +586,10 @@ int iwl_pci_resume(struct pci_dev *pdev);
* Error Handling Debugging
******************************************************/
void iwl_dump_nic_error_log(struct iwl_priv *priv);
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+int iwl_dump_nic_event_log(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+void iwl_dump_csr(struct iwl_priv *priv);
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv);
#else
@@ -601,7 +609,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
/*************** DRIVER STATUS FUNCTIONS *****/
#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
#define STATUS_INT_ENABLED 2
#define STATUS_RF_KILL_HW 3
#define STATUS_CT_KILL 4
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a7bfae01f19b..808b7146bead 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,8 +77,7 @@
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
*
- * NOTE: Newer devices using one-time-programmable (OTP) memory
- * require device to be awake in order to read this memory
+ * NOTE: Device does need to be awake in order to read this memory
* via CSR_EEPROM and CSR_OTP registers
*/
#define CSR_BASE (0x000)
@@ -111,9 +110,8 @@
/*
* EEPROM and OTP (one-time-programmable) memory reads
*
- * NOTE: For (newer) devices using OTP, device must be awake, initialized via
- * apm_ops.init() in order to read. Older devices (3945/4965/5000)
- * use EEPROM and do not require this.
+ * NOTE: Device must be awake, initialized via apm_ops.init(),
+ * in order to read.
*/
#define CSR_EEPROM_REG (CSR_BASE+0x02c)
#define CSR_EEPROM_GP (CSR_BASE+0x030)
@@ -371,7 +369,7 @@
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000)
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001)
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002)
-
+#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d61293ab67c9..1c7b53d511c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -67,57 +67,6 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-struct iwl_debugfs {
- const char *name;
- struct dentry *dir_drv;
- struct dentry *dir_data;
- struct dentry *dir_debug;
- struct dentry *dir_rf;
- struct dir_data_files {
- struct dentry *file_sram;
- struct dentry *file_nvm;
- struct dentry *file_stations;
- struct dentry *file_log_event;
- struct dentry *file_channels;
- struct dentry *file_status;
- struct dentry *file_interrupt;
- struct dentry *file_qos;
- struct dentry *file_thermal_throttling;
- struct dentry *file_led;
- struct dentry *file_disable_ht40;
- struct dentry *file_sleep_level_override;
- struct dentry *file_current_sleep_command;
- } dbgfs_data_files;
- struct dir_rf_files {
- struct dentry *file_disable_sensitivity;
- struct dentry *file_disable_chain_noise;
- struct dentry *file_disable_tx_power;
- } dbgfs_rf_files;
- struct dir_debug_files {
- struct dentry *file_rx_statistics;
- struct dentry *file_tx_statistics;
- struct dentry *file_traffic_log;
- struct dentry *file_rx_queue;
- struct dentry *file_tx_queue;
- struct dentry *file_ucode_rx_stats;
- struct dentry *file_ucode_tx_stats;
- struct dentry *file_ucode_general_stats;
- struct dentry *file_sensitivity;
- struct dentry *file_chain_noise;
- struct dentry *file_tx_power;
- struct dentry *file_power_save_status;
- struct dentry *file_clear_ucode_statistics;
- struct dentry *file_clear_traffic_statistics;
- } dbgfs_debug_files;
- u32 sram_offset;
- u32 sram_len;
-};
-
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
-#endif
-
#else
#define IWL_DEBUG(__priv, level, fmt, args...)
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
@@ -126,9 +75,10 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
{}
#endif /* CONFIG_IWLWIFI_DEBUG */
-
-
-#ifndef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_dbgfs_unregister(struct iwl_priv *priv);
+#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 21e0f6699daf..b6e1b0ebe230 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +26,7 @@
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
@@ -41,43 +42,28 @@
#include "iwl-calib.h"
/* create and remove of files */
-#define DEBUGFS_ADD_DIR(name, parent) do { \
- dbgfs->dir_##name = debugfs_create_dir(#name, parent); \
- if (!(dbgfs->dir_##name)) \
- goto err; \
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, priv, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_file(#name, mode, \
- dbgfs->dir_##parent, priv, \
- &iwl_dbgfs_##name##_ops); \
- if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \
- goto err; \
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_x32(#name, S_IRUSR, dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_REMOVE(name) do { \
- debugfs_remove(name); \
- name = NULL; \
-} while (0);
-
/* file operation */
#define DEBUGFS_READ_FUNC(name) \
static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
@@ -125,7 +111,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
@@ -184,7 +170,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
int cnt;
@@ -232,28 +218,28 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
ssize_t ret;
int i;
int pos = 0;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
size_t bufsz;
/* default is to dump the entire data segment */
- if (!priv->dbgfs->sram_offset && !priv->dbgfs->sram_len) {
- priv->dbgfs->sram_offset = 0x800000;
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs->sram_len = priv->ucode_init_data.len;
+ priv->dbgfs_sram_len = priv->ucode_init_data.len;
else
- priv->dbgfs->sram_len = priv->ucode_data.len;
+ priv->dbgfs_sram_len = priv->ucode_data.len;
}
- bufsz = 30 + priv->dbgfs->sram_len * sizeof(char) * 10;
+ bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs->sram_len);
+ priv->dbgfs_sram_len);
pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs->sram_offset);
- for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
- val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
- priv->dbgfs->sram_len - i);
+ priv->dbgfs_sram_offset);
+ for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+ val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+ priv->dbgfs_sram_len - i);
if (i < 4) {
switch (i) {
case 1:
@@ -293,11 +279,11 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
return -EFAULT;
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs->sram_offset = offset;
- priv->dbgfs->sram_len = len;
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = len;
} else {
- priv->dbgfs->sram_offset = 0;
- priv->dbgfs->sram_len = 0;
+ priv->dbgfs_sram_offset = 0;
+ priv->dbgfs_sram_len = 0;
}
return count;
@@ -306,7 +292,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_station_entry *station;
int max_sta = priv->hw_params.max_stations;
char *buf;
@@ -376,7 +362,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
loff_t *ppos)
{
ssize_t ret;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, ofs = 0, buf_size = 0;
const u8 *ptr;
char *buf;
@@ -420,6 +406,24 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return ret;
}
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+ priv, true, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
static ssize_t iwl_dbgfs_log_event_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -436,7 +440,8 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
if (sscanf(buf, "%d", &event_log_flag) != 1)
return -EFAULT;
if (event_log_flag == 1)
- priv->cfg->ops->lib->dump_nic_event_log(priv, true);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+ NULL, false);
return count;
}
@@ -446,7 +451,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct ieee80211_channel *channels = NULL;
const struct ieee80211_supported_band *supp_band = NULL;
int pos = 0, i, bufsz = PAGE_SIZE;
@@ -519,15 +524,13 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[512];
int pos = 0;
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
test_bit(STATUS_HCMD_ACTIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
- test_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
test_bit(STATUS_INT_ENABLED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
@@ -567,7 +570,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -654,7 +657,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -677,7 +680,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -703,7 +706,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
struct iwl_tt_restriction *restriction;
char buf[100];
@@ -763,7 +766,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -811,7 +814,9 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
priv->power_data.debug_sleep_level_override = value;
+ mutex_lock(&priv->mutex);
iwl_power_update_mode(priv, true);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -820,7 +825,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[10];
int pos, value;
const size_t bufsz = sizeof(buf);
@@ -838,7 +843,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[200];
int pos = 0, i;
const size_t bufsz = sizeof(buf);
@@ -859,7 +864,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
}
DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(channels);
@@ -976,7 +981,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
char *buf;
@@ -1022,7 +1027,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_rx_queue *rxq = &priv->rxq;
char buf[256];
int pos = 0;
@@ -1063,36 +1068,33 @@ static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
return p;
}
+static const char ucode_stats_header[] =
+ "%-32s current acumulative delta max\n";
+static const char ucode_stats_short_format[] =
+ " %-30s %10u\n";
+static const char ucode_stats_format[] =
+ " %-30s %10u %10u %10u %10u\n";
static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 20 +
- sizeof(struct statistics_rx_non_phy) * 20 +
- sizeof(struct statistics_rx_ht_phy) * 20 + 400;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm;
- struct statistics_rx_phy *cck, *accum_cck;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_ht_phy *ht, *accum_ht;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1111,264 +1113,401 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
accum_cck = &priv->accum_statistics.rx.cck;
accum_general = &priv->accum_statistics.rx.general;
accum_ht = &priv->accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->delta_statistics.rx.ofdm;
+ delta_cck = &priv->delta_statistics.rx.cck;
+ delta_general = &priv->delta_statistics.rx.general;
+ delta_ht = &priv->delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->max_delta.rx.ofdm;
+ max_cck = &priv->max_delta.rx.cck;
+ max_general = &priv->max_delta.rx.general;
+ max_ht = &priv->max_delta.rx.ofdm_ht;
+
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good,
+ max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_cck->unresponded_rts,
+ delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_cts:",
le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
+ accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_ack:",
le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_bssid_frames:\t%u\t\t\t%u\n",
+ accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "filtered_frames:\t%u\t\t\t%u\n",
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "filtered_frames:",
le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_beacons:",
le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "num_missed_bcon:\t%u\t\t\t%u\n",
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_missed_bcon:",
le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- "adc_rx_saturation_time:\t%u\t\t\t%u\n",
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "adc_rx_saturation_time:",
le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ina_detect_search_tm:\t%u\t\t\t%u\n",
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_detect_search_tm:",
le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_a:",
le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_b:",
le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_c:",
le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "interference_data_flag:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "interference_data_flag:",
le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_load:\t\t%u\t\t\t%u\n",
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_load:",
le32_to_cpu(general->channel_load),
- accum_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_false_alarms:\t%u\t\t\t%u\n",
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_false_alarms:",
le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_a:\t\t%u\t\t\t%u\n",
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_a:",
le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_b:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_b:",
le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_c:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_c:",
le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_a:\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_a:",
le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_b:",
le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_c:",
le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c);
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_crc32_good:",
le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_mpdu_cnt:",
le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1379,26 +1518,16 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
ssize_t ret;
- struct statistics_tx *tx, *accum_tx;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1411,106 +1540,148 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
*/
tx = &priv->statistics.tx;
accum_tx = &priv->accum_statistics.tx;
+ delta_tx = &priv->delta_statistics.tx;
+ max_tx = &priv->max_delta.tx;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "preamble:",
le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_detected_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_timeout:",
le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "expected_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dump_msdu_cnt:",
le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_nxt_frame_mismatch:"
- "\t%u\t\t\t%u\n",
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_nxt_frame_mismatch:",
le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_missing_nxt_frame:"
- "\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_missing_nxt_frame:",
le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout_collision:\t\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout_collision:",
le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_ba_timeout_collision:\t%u\t\t\t%u\n",
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_ba_timeout_collision:",
le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_timeout:",
le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_resched_frames:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_resched_frames:",
le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg_frame:\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg_frame:",
le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_no_agg:",
le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg:",
le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_mismatch:",
le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg frame_not_ready:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg frame_not_ready:",
le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg underrun:\t\t\t%u\t\t\t%u\n",
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg underrun:",
le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg bt_prio_kill:",
le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg rx_ba_rsp_cnt:",
le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt);
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1521,28 +1692,19 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_general) * 4 + 250;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
ssize_t ret;
struct statistics_general *general, *accum_general;
- struct statistics_dbg *dbg, *accum_dbg;
- struct statistics_div *div, *accum_div;
+ struct statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1557,52 +1719,78 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
dbg = &priv->statistics.general.dbg;
div = &priv->statistics.general.div;
accum_general = &priv->accum_statistics.general;
+ delta_general = &priv->delta_statistics.general;
+ max_general = &priv->max_delta.general;
accum_dbg = &priv->accum_statistics.general.dbg;
+ delta_dbg = &priv->delta_statistics.general.dbg;
+ max_dbg = &priv->max_delta.general.dbg;
accum_div = &priv->accum_statistics.general.div;
+ delta_div = &priv->delta_statistics.general.div;
+ max_div = &priv->max_delta.general.div;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature:",
le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature_m:",
le32_to_cpu(general->temperature_m));
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_check:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_check:",
le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_count:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_count:",
le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sleep_time:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sleep_time:",
le32_to_cpu(general->sleep_time),
- accum_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_out:\t\t\t%u\t\t\t%u\n",
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_out:",
le32_to_cpu(general->slots_out),
- accum_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_idle:\t\t\t%u\t\t\t%u\n",
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_idle:",
le32_to_cpu(general->slots_idle),
- accum_general->slots_idle);
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "exec_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->exec_time), accum_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "probe_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->probe_time), accum_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_enable_counter:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_enable_counter:",
le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter);
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1612,7 +1800,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1693,7 +1881,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1751,26 +1939,15 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[128];
int pos = 0;
- ssize_t ret;
const size_t bufsz = sizeof(buf);
struct statistics_tx *tx;
if (!iwl_is_alive(priv))
pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
else {
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv, "Error sending statistics request: %zd\n",
- ret);
- return -EAGAIN;
- }
tx = &priv->statistics.tx;
if (tx->tx_power.ant_a ||
tx->tx_power.ant_b ||
@@ -1802,7 +1979,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[60];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -1845,6 +2022,262 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[128];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+ priv->event_log.ucode_trace ? "On" : "Off");
+ pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+ priv->event_log.non_wraps_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+ priv->event_log.wraps_once_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+ priv->event_log.wraps_more_count);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int trace;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &trace) != 1)
+ return -EFAULT;
+
+ if (trace) {
+ priv->event_log.ucode_trace = true;
+ /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ } else {
+ priv->event_log.ucode_trace = false;
+ del_timer_sync(&priv->ucode_trace);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_fh) {
+ ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+ priv->missed_beacon_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+ priv->missed_beacon_threshold =
+ IWL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ priv->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int scan;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &scan) != 1)
+ return -EINVAL;
+
+ iwl_internal_short_hw_scan(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+ priv->cfg->plcp_delta_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int plcp;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &plcp) != 1)
+ return -EINVAL;
+ if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+ (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+ priv->cfg->plcp_delta_threshold =
+ IWL_MAX_PLCP_ERR_THRESHOLD_DEF;
+ else
+ priv->cfg->plcp_delta_threshold = plcp;
+ return count;
+}
+
+static ssize_t iwl_dbgfs_force_reset_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int i, pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct iwl_force_reset *force_reset;
+
+ for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
+ force_reset = &priv->force_reset[i];
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Force reset method %d\n", i);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\treset duration: %lu\n",
+ force_reset->reset_duration);
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int reset, ret;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &reset) != 1)
+ return -EINVAL;
+ switch (reset) {
+ case IWL_RF_RESET:
+ case IWL_FW_RESET:
+ ret = iwl_force_reset(priv, reset);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret ? ret : count;
+}
+
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1859,6 +2292,13 @@ DEBUGFS_READ_FILE_OPS(tx_power);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_WRITE_FILE_OPS(internal_scan);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
/*
* Create the debugfs files and directories
@@ -1866,69 +2306,74 @@ DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
*/
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
- struct iwl_debugfs *dbgfs;
struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- int ret = 0;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
- dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
- if (!dbgfs) {
- ret = -ENOMEM;
- goto err;
- }
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ priv->debugfs_dir = dir_drv;
- priv->dbgfs = dbgfs;
- dbgfs->name = name;
- dbgfs->dir_drv = debugfs_create_dir(name, phyd);
- if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) {
- ret = -ENOENT;
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
goto err;
- }
- DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
- DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, data, S_IWUSR);
- DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, data, S_IRUSR);
- DEBUGFS_ADD_FILE(led, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sleep_level_override, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(current_sleep_command, data, S_IRUSR);
- DEBUGFS_ADD_FILE(thermal_throttling, data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_power, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
}
- DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
- DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&priv->disable_chain_noise_cal);
if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_ADD_BOOL(disable_tx_power, rf,
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
&priv->disable_tx_power_cal);
return 0;
err:
- IWL_ERR(priv, "Can't open the debugfs directory\n");
+ IWL_ERR(priv, "Can't create the debugfs directory\n");
iwl_dbgfs_unregister(priv);
- return ret;
+ return -ENOMEM;
}
EXPORT_SYMBOL(iwl_dbgfs_register);
@@ -1938,56 +2383,11 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
*/
void iwl_dbgfs_unregister(struct iwl_priv *priv)
{
- if (!priv->dbgfs)
+ if (!priv->debugfs_dir)
return;
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sleep_level_override);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_current_sleep_command);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_nvm);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
- DEBUGFS_REMOVE(priv->dbgfs->dir_data);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_traffic_log);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_ucode_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_traffic_statistics);
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_rx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_tx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_general_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_chain_noise);
- }
- DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
- if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
- DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
- kfree(priv->dbgfs);
- priv->dbgfs = NULL;
+ debugfs_remove_recursive(priv->debugfs_dir);
+ priv->debugfs_dir = NULL;
}
EXPORT_SYMBOL(iwl_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2673e9a4db92..ef1720a852e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -512,6 +512,7 @@ struct iwl_ht_config {
bool is_ht;
bool is_40mhz;
bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
/* BSS related data */
u8 extension_chan_offset;
u8 ht_protection;
@@ -711,7 +712,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
- return q->write_ptr > q->read_ptr ?
+ return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
@@ -984,6 +985,74 @@ struct iwl_switch_rxon {
__le16 channel;
};
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry: the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ * when dump ucode events
+ */
+struct iwl_event_log {
+ bool ucode_trace;
+ u32 num_wraps;
+ u32 next_entry;
+ int non_wraps_count;
+ int wraps_once_count;
+ int wraps_more_count;
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs. It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+
+#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
+#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+enum iwl_reset {
+ IWL_RF_RESET = 0,
+ IWL_FW_RESET,
+ IWL_MAX_FORCE_RESET,
+};
+
+struct iwl_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1004,13 +1073,19 @@ struct iwl_priv {
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-#if defined(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) || defined(CONFIG_IWL3945_SPECTRUM_MEASUREMENT)
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
-#endif
+
/* ucode beacon time */
u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* storing the jiffies when the plcp error rate is received */
+ unsigned long plcp_jiffies;
+
+ /* force reset */
+ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
/* we allocate array of iwl4965_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
@@ -1029,7 +1104,6 @@ struct iwl_priv {
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
- unsigned long last_scan_jiffies;
unsigned long next_scan_jiffies;
unsigned long scan_start;
unsigned long scan_pass_start;
@@ -1037,6 +1111,7 @@ struct iwl_priv {
void *scan;
int scan_bands;
struct cfg80211_scan_request *scan_request;
+ bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1045,6 +1120,7 @@ struct iwl_priv {
spinlock_t hcmd_lock; /* protect hcmd */
spinlock_t reg_lock; /* protect hw register access */
struct mutex mutex;
+ struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
/* basic pci-network driver stuff */
struct pci_dev *pci_dev;
@@ -1056,6 +1132,7 @@ struct iwl_priv {
u8 rev_id;
/* uCode images, save to reload in case of failure */
+ int fw_index; /* firmware we're trying to load */
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
struct fw_desc ucode_code; /* runtime inst */
@@ -1066,6 +1143,7 @@ struct iwl_priv {
struct fw_desc ucode_boot; /* bootstrap inst */
enum ucode_type ucode_type;
u8 ucode_write_complete; /* the image write is complete */
+ char firmware_name[25];
struct iwl_rxon_time_cmd rxon_timing;
@@ -1135,6 +1213,8 @@ struct iwl_priv {
struct iwl_notif_statistics statistics;
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_notif_statistics accum_statistics;
+ struct iwl_notif_statistics delta_statistics;
+ struct iwl_notif_statistics max_delta;
#endif
/* context information */
@@ -1168,7 +1248,7 @@ struct iwl_priv {
u32 last_beacon_time;
u64 last_tsf;
- /* eeprom */
+ /* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
struct iwl_eeprom_calib_info *calib_info;
@@ -1207,20 +1287,16 @@ struct iwl_priv {
struct workqueue_struct *workqueue;
- struct work_struct up;
struct work_struct restart;
- struct work_struct calibrated_work;
struct work_struct scan_completed;
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct work_struct update_link_led;
- struct work_struct auth_work;
- struct work_struct report_work;
struct work_struct request_scan;
struct work_struct beacon_update;
struct work_struct tt_work;
struct work_struct ct_enter;
struct work_struct ct_exit;
+ struct work_struct start_internal_scan;
struct tasklet_struct irq_tasklet;
@@ -1251,7 +1327,8 @@ struct iwl_priv {
u16 rx_traffic_idx;
u8 *tx_traffic;
u8 *rx_traffic;
- struct iwl_debugfs *dbgfs;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
#endif /* CONFIG_IWLWIFI_DEBUG */
@@ -1261,6 +1338,7 @@ struct iwl_priv {
u32 disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
+ struct timer_list ucode_trace;
bool hw_ready;
/*For 3945*/
#define IWL_DEFAULT_TX_POWER 0x0F
@@ -1268,6 +1346,8 @@ struct iwl_priv {
struct iwl3945_notif_statistics statistics_39;
u32 sta_supp_rates;
+
+ struct iwl_event_log event_log;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
@@ -1353,4 +1433,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
}
+static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
+{
+ __free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
+
+static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+ free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index e7d88d1da15d..2ffc2edbf4f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,7 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
#include <linux/module.h>
/* sparse doesn't like tracepoint macros */
#ifndef __CHECKER__
+#include "iwl-dev.h"
+
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
@@ -11,4 +39,6 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 21361968ab7e..ae7319bb3a99 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,8 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __IWLWIFI_DEVICE_TRACE
#include <linux/tracepoint.h>
-#include "iwl-dev.h"
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
#undef TRACE_EVENT
@@ -65,6 +90,50 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_ucode
+
+TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
+ TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, wraps)
+ __field(u32, n_entry)
+ __field(u32, p_entry)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->wraps = wraps;
+ __entry->n_entry = n_entry;
+ __entry->p_entry = p_entry;
+ ),
+ TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+ __entry->priv, __entry->wraps, __entry->n_entry,
+ __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 3946e5c03f81..fb5bb487f3bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <net/mac80211.h>
@@ -370,7 +371,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
return ret;
}
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
@@ -404,7 +405,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
}
- *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
+ *eeprom_data = cpu_to_le16(r >> 16);
return 0;
}
@@ -413,7 +414,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
*/
static bool iwl_is_otp_empty(struct iwl_priv *priv)
{
- u16 next_link_addr = 0, link_value;
+ u16 next_link_addr = 0;
+ __le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
@@ -443,7 +445,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
static int iwl_find_otp_image(struct iwl_priv *priv,
u16 *validblockaddr)
{
- u16 next_link_addr = 0, link_value = 0, valid_addr;
+ u16 next_link_addr = 0, valid_addr;
+ __le16 link_value = 0;
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
@@ -463,7 +466,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* check for more block on the link list
*/
valid_addr = next_link_addr;
- next_link_addr = link_value * sizeof(u16);
+ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
if (iwl_read_otp_word(priv, next_link_addr, &link_value))
@@ -497,7 +500,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
int iwl_eeprom_init(struct iwl_priv *priv)
{
- u16 *e;
+ __le16 *e;
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
int sz;
int ret;
@@ -516,12 +519,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
ret = -ENOMEM;
goto alloc_err;
}
- e = (u16 *)priv->eeprom;
+ e = (__le16 *)priv->eeprom;
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
- /* OTP reads require powered-up chip */
- priv->cfg->ops->lib->apm_ops.init(priv);
- }
+ priv->cfg->ops->lib->apm_ops.init(priv);
ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
if (ret < 0) {
@@ -562,7 +562,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
}
for (addr = validblockaddr; addr < validblockaddr + sz;
addr += sizeof(u16)) {
- u16 eeprom_data;
+ __le16 eeprom_data;
ret = iwl_read_otp_word(priv, addr, &eeprom_data);
if (ret)
@@ -570,13 +570,6 @@ int iwl_eeprom_init(struct iwl_priv *priv)
e[cache_addr / 2] = eeprom_data;
cache_addr += sizeof(u16);
}
-
- /*
- * Now that OTP reads are complete, reset chip to save
- * power until we load uCode during "up".
- */
- priv->cfg->ops->lib->apm_ops.stop(priv);
-
} else {
/* eeprom is an array of 16bit values */
for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -594,7 +587,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
goto done;
}
r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
+ e[addr / 2] = cpu_to_le16(r >> 16);
}
}
ret = 0;
@@ -603,6 +596,8 @@ done:
err:
if (ret)
iwl_eeprom_free(priv);
+ /* Reset chip to save power until we load uCode during "up". */
+ priv->cfg->ops->lib->apm_ops.stop(priv);
alloc_err:
return ret;
}
@@ -755,7 +750,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
ch_info->ht40_eeprom = *eeprom_ch;
ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
ch_info->ht40_flags = eeprom_ch->flags;
- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 5cd2b66bbe45..8171c701e4e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -137,7 +137,7 @@ struct iwl_eeprom_channel {
*
*/
struct iwl_eeprom_enhanced_txpwr {
- u16 common;
+ __le16 common;
s8 chain_a_max;
s8 chain_b_max;
s8 chain_c_max;
@@ -203,6 +203,10 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
+/* 6000 regulatory - indirect access */
+#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
+ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
+
/* 6000 and up regulatory tx power - indirect access */
/* max. elements per section */
#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8)
@@ -360,7 +364,7 @@ struct iwl_eeprom_calib_subband_info {
struct iwl_eeprom_calib_info {
u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
u8 saturation_power52; /* half-dBm */
- s16 voltage; /* signed */
+ __le16 voltage; /* signed */
struct iwl_eeprom_calib_subband_info
band_info[EEPROM_TX_POWER_BANDS];
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 65fa8a69fd5a..113c3669b9ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -379,6 +379,25 @@
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+
#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index a23165948202..73681c4fefe7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(COEX_PRIORITY_TABLE_CMD);
IWL_CMD(COEX_MEDIUM_NOTIFICATION);
IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(RADAR_NOTIFICATION);
IWL_CMD(REPLY_QUIET_CMD);
IWL_CMD(REPLY_CHANNEL_SWITCH);
IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
@@ -165,15 +164,13 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* A synchronous command can not have a callback set. */
BUG_ON(cmd->callback);
- if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: Already sending a host command\n",
+ IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
- ret = -EBUSY;
- goto out;
- }
+ mutex_lock(&priv->sync_cmd_mutex);
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n",
+ get_cmd_string(cmd->id));
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
if (cmd_idx < 0) {
@@ -194,6 +191,8 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;
}
@@ -234,11 +233,11 @@ cancel:
}
fail:
if (cmd->reply_page) {
- free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd->reply_page);
cmd->reply_page = 0;
}
out:
- clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
+ mutex_unlock(&priv->sync_cmd_mutex);
return ret;
}
EXPORT_SYMBOL(iwl_send_cmd_sync);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index bd0b12efb5c7..51a67fb2e185 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
if (desc->v_addr)
- pci_free_consistent(pci_dev, desc->len,
- desc->v_addr, desc->p_addr);
+ dma_free_coherent(&pci_dev->dev, desc->len,
+ desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
@@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
- desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
+ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+ &desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index e552d4c4bdbe..16eb3ced9b30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -31,6 +31,7 @@
#include <linux/io.h>
+#include "iwl-dev.h"
#include "iwl-debug.h"
#include "iwl-devtrace.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46c7a95b88f0..a6f9c918aabc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index f47f053f02ea..49a70baa3fb6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 8ccc0bb1d9ed..548dac2f6a96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <net/mac80211.h>
@@ -303,13 +304,12 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
sizeof(struct iwl_powertable_cmd), cmd);
}
-
+/* priv->mutex must be held */
int iwl_power_update_mode(struct iwl_priv *priv, bool force)
{
int ret = 0;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- bool enabled = (priv->iw_mode == NL80211_IFTYPE_STATION) &&
- (priv->hw->conf.flags & IEEE80211_CONF_PS);
+ bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
bool update_chains;
struct iwl_powertable_cmd cmd;
int dtimper;
@@ -319,7 +319,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
if (priv->vif)
- dtimper = priv->vif->bss_conf.dtim_period;
+ dtimper = priv->hw->conf.ps_dtim_period;
else
dtimper = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 310c32e8f698..5db91c10dcc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 6d95832db06d..d2d2a9174900 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6090bc15a6d5..e5eb339107dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -28,6 +28,7 @@
*****************************************************************************/
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "iwl-eeprom.h"
@@ -123,12 +124,11 @@ EXPORT_SYMBOL(iwl_rx_queue_space);
/**
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
-int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
+void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
{
unsigned long flags;
u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
u32 reg;
- int ret = 0;
spin_lock_irqsave(&q->lock, flags);
@@ -161,7 +161,6 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
exit_unlock:
spin_unlock_irqrestore(&q->lock, flags);
- return ret;
}
EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
/**
@@ -184,14 +183,13 @@ static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-int iwl_rx_queue_restock(struct iwl_priv *priv)
+void iwl_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
int write;
- int ret = 0;
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
@@ -220,10 +218,8 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- ret = iwl_rx_queue_update_write_ptr(priv, rxq);
+ iwl_rx_queue_update_write_ptr(priv, rxq);
}
-
- return ret;
}
EXPORT_SYMBOL(iwl_rx_queue_restock);
@@ -345,17 +341,15 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
- priv->alloc_rxb_page--;
}
}
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@@ -364,7 +358,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i;
spin_lock_init(&rxq->lock);
@@ -372,12 +366,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
+ GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma);
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb;
@@ -394,8 +389,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
return 0;
err_rb:
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
err_bd:
return -ENOMEM;
}
@@ -416,9 +411,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- priv->alloc_rxb_page--;
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -477,8 +470,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
- /* Set interrupt coalescing timer to 64 x 32 = 2048 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 0x40);
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
return 0;
}
@@ -503,9 +496,10 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_missed_beacon_notif *missed_beacon;
missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consequtive_missed_beacons),
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
@@ -515,6 +509,24 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ IWL_DEBUG_11H(priv,
+ "Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&priv->measure_report, report, sizeof(*report));
+ priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
+
+
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
@@ -568,15 +580,24 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
int i;
__le32 *prev_stats;
u32 *accum_stats;
+ u32 *delta, *max_delta;
prev_stats = (__le32 *)&priv->statistics;
accum_stats = (u32 *)&priv->accum_statistics;
+ delta = (u32 *)&priv->delta_statistics;
+ max_delta = (u32 *)&priv->max_delta;
for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
- *accum_stats += (le32_to_cpu(*stats) -
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
/* reset accumulative statistics for "no-counter" type statistics */
priv->accum_statistics.general.temperature =
@@ -596,11 +617,15 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
+#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(priv->statistics),
@@ -615,6 +640,56 @@ void iwl_rx_statistics(struct iwl_priv *priv,
#ifdef CONFIG_IWLWIFI_DEBUG
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
+ (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold, the following
+ * data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * Received ofdm_ht.plcp_err,
+ * Current ofdm_ht.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, PLCP_MSG,
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
+ le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
+ le32_to_cpu(
+ priv->statistics.rx.ofdm_ht.plcp_err),
+ combined_plcp_delta, plcp_msec);
+
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+ iwl_force_reset(priv, IWL_RF_RESET);
+ }
+ }
+
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
set_bit(STATUS_STATISTICS, &priv->status);
@@ -642,11 +717,13 @@ void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
- memset(&priv->statistics, 0,
- sizeof(struct iwl_notif_statistics));
#ifdef CONFIG_IWLWIFI_DEBUG
memset(&priv->accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
+ memset(&priv->delta_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->max_delta, 0,
+ sizeof(struct iwl_notif_statistics));
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
@@ -654,47 +731,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95) /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- * about formulas used below. */
-static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
- int sig_qual;
- int degradation = PERFECT_RSSI - rssi_dbm;
-
- /* If we get a noise measurement, use signal-to-noise ratio (SNR)
- * as indicator; formula is (signal dbm - noise dbm).
- * SNR at or above 40 is a great signal (100%).
- * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
- * Weakest usable signal is usually 10 - 15 dB SNR. */
- if (noise_dbm) {
- if (rssi_dbm - noise_dbm >= 40)
- return 100;
- else if (rssi_dbm < noise_dbm)
- return 0;
- sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
- /* Else use just the signal level.
- * This formula is a least squares fit of data points collected and
- * compared with a reference system that had a percentage (%) display
- * for signal quality. */
- } else
- sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
- (15 * RSSI_RANGE + 62 * degradation)) /
- (RSSI_RANGE * RSSI_RANGE);
-
- if (sig_qual > 100)
- sig_qual = 100;
- else if (sig_qual < 1)
- sig_qual = 0;
-
- return sig_qual;
-}
-
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwl_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
@@ -973,7 +1009,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
if (ieee80211_is_mgmt(fc) ||
ieee80211_has_protected(fc) ||
ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+ le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
+ (ieee80211_is_data_qos(fc) &&
+ *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
ret = skb_linearize(skb);
else
ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
@@ -1105,11 +1144,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
if (iwl_is_associated(priv) &&
!test_bit(STATUS_SCANNING, &priv->status)) {
rx_status.noise = priv->last_rx_noise;
- rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
- rx_status.noise);
} else {
rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
- rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
}
/* Reset beacon noise level if not associated. */
@@ -1122,8 +1158,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
iwl_dbg_report_frame(priv, phy_res, len, header, 1);
#endif
iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n",
- rx_status.signal, rx_status.noise, rx_status.qual,
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
+ rx_status.signal, rx_status.noise,
(unsigned long long)rx_status.mactime);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a2b2b8315ff9..12e455a4b90e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -25,6 +25,7 @@
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
@@ -144,8 +145,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
clear_bit(STATUS_SCAN_HW, &priv->status);
}
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
@@ -193,19 +193,17 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
"(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec (%dms since last)\n",
+ "elapsed=%lu usec\n",
notif->channel,
notif->band ? "bg" : "a",
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
- jiffies_to_msecs(elapsed_jiffies
- (priv->last_scan_jiffies, jiffies)));
+ le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -251,8 +249,9 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
goto reschedule;
}
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
+
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
clear_bit(STATUS_SCANNING, &priv->status);
@@ -315,6 +314,72 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_get_passive_dwell_time);
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int i, added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ /* only scan single channel, good enough to reset the RF */
+ /* pick the first valid not in-use channel */
+ if (band == IEEE80211_BAND_5GHZ) {
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < 14; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel =
+ priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ }
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
@@ -405,23 +470,11 @@ EXPORT_SYMBOL(iwl_init_scan_params);
static int iwl_scan_initiate(struct iwl_priv *priv)
{
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- return -EAGAIN;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
- return -EAGAIN;
- }
+ WARN_ON(!mutex_is_locked(&priv->mutex));
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = false;
priv->scan_start = jiffies;
priv->scan_pass_start = priv->scan_start;
@@ -450,6 +503,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
/* We don't schedule scan within next_scan_jiffies period.
* Avoid scanning during possible EAPOL exchange, return
* success immediately.
@@ -462,15 +527,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- /* if we just finished scan ask for delay */
- if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
- time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
- IWL_DEBUG_SCAN(priv, "scan rejected: within previous scan period\n");
- queue_work(priv->workqueue, &priv->scan_completed);
- ret = 0;
- goto out_unlock;
- }
-
priv->scan_bands = 0;
for (i = 0; i < req->n_channels; i++)
priv->scan_bands |= BIT(req->channels[i]->band);
@@ -489,6 +545,52 @@ out_unlock:
}
EXPORT_SYMBOL(iwl_mac_hw_scan);
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+void iwl_internal_short_hw_scan(struct iwl_priv *priv)
+{
+ queue_work(priv->workqueue, &priv->start_internal_scan);
+}
+
+static void iwl_bg_start_internal_scan(struct work_struct *work)
+{
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, start_internal_scan);
+
+ mutex_lock(&priv->mutex);
+
+ if (!iwl_is_ready_rf(priv)) {
+ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
+ goto unlock;
+ }
+
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ goto unlock;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ goto unlock;
+ }
+
+ priv->scan_bands = 0;
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
+ else
+ priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
+
+ IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = true;
+ queue_work(priv->workqueue, &priv->request_scan);
+ unlock:
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL(iwl_internal_short_hw_scan);
+
#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
void iwl_bg_scan_check(struct work_struct *data)
@@ -552,7 +654,8 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (WARN_ON(left < ie_len))
return len;
- memcpy(pos, ies, ie_len);
+ if (ies)
+ memcpy(pos, ies, ie_len);
len += ie_len;
left -= ie_len;
@@ -655,7 +758,6 @@ static void iwl_bg_request_scan(struct work_struct *data)
unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
spin_lock_irqsave(&priv->lock, flags);
interval = priv->beacon_int;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -673,7 +775,9 @@ static void iwl_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- if (priv->scan_request->n_ssids) {
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -754,24 +858,38 @@ static void iwl_bg_request_scan(struct work_struct *data)
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ }
scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
if (iwl_is_monitor_mode(priv))
scan->filter_flags = RXON_FILTER_PROMISC_MSK;
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
- scan->channel_count =
- iwl_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
-
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
goto done;
@@ -832,7 +950,12 @@ void iwl_bg_scan_completed(struct work_struct *work)
cancel_delayed_work(&priv->scan_check);
- ieee80211_scan_completed(priv->hw, false);
+ if (!priv->is_internal_short_scan)
+ ieee80211_scan_completed(priv->hw, false);
+ else {
+ priv->is_internal_short_scan = false;
+ IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
+ }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -850,6 +973,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
+ INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
}
EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
deleted file mode 100644
index 1ea5cd345fe8..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-spectrum.h"
-
-#define BEACON_TIME_MASK_LOW 0x00FFFFFF
-#define BEACON_TIME_MASK_HIGH 0xFF000000
-#define TIME_UNIT 1024
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in 8:24 format
- * the high 1 byte is the beacon counts
- * the lower 3 bytes is the time in usec within one beacon interval
- */
-
-/* TOOD: was used in sysfs debug interface need to add to mac */
-#if 0
-static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * 1024;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
- rem = (usec % interval) & BEACON_TIME_MASK_LOW;
-
- return (quot << 24) + rem;
-}
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-
-static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & BEACON_TIME_MASK_LOW;
- u32 addon_low = addon & BEACON_TIME_MASK_LOW;
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & BEACON_TIME_MASK_HIGH) +
- (addon & BEACON_TIME_MASK_HIGH);
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << 24);
- } else
- res += (1 << 24);
-
- return cpu_to_le32(res);
-}
-static int iwl_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl4965_spectrum_cmd spectrum;
- struct iwl_rx_packet *res;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .meta.flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
-
- if (iwl_is_associated(priv))
- add_time =
- iwl_usecs_to_beacons(
- le64_to_cpu(params->start_time) - priv->last_tsf,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_is_associated(priv))
- spectrum.start_time =
- iwl_add_beacon_time(priv->last_beacon_time,
- add_time,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
- if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (res->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv,
- "Replaced existing measurement: %d\n",
- res->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- dev_kfree_skb_any(cmd.meta.u.skb);
-
- return rc;
-}
-#endif
-
-static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_rx_spectrum_measure_notif;
-}
-EXPORT_SYMBOL(iwl_setup_spectrum_handlers);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index a77c1e619062..af6babee2891 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ieee80211 subsystem header files.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index cd6a6901216e..4a6686fa6b36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -80,46 +80,103 @@ int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
}
EXPORT_SYMBOL(iwl_get_ra_sta_id);
+/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n",
- sta_id);
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA to Ucode: %pM\n",
- priv->stations[sta_id].sta.sta.addr);
+ IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present in uCode (according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
}
-static void iwl_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt,
+ bool sync)
{
- struct iwl_addsta_cmd *addsta =
- (struct iwl_addsta_cmd *)cmd->cmd.payload;
u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
+ pkt->hdr.flags);
return;
}
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
switch (pkt->u.add_sta.status) {
case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
iwl_sta_ucode_activate(priv, sta_id);
- /* fall through */
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
+ sta_id);
+ break;
default:
- IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
break;
}
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC adress
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+
+ /*
+ * Determine if we wanted to modify or add a station,
+ * if adding a station succeeded we have some more initialization
+ * to do when using station notification. TODO
+ */
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static void iwl_add_sta_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_addsta_cmd *addsta =
+ (struct iwl_addsta_cmd *)cmd->cmd.payload;
+
+ iwl_process_add_sta_resp(priv, addsta, pkt, false);
+
}
int iwl_send_add_sta(struct iwl_priv *priv,
@@ -145,28 +202,11 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (ret || (flags & CMD_ASYNC))
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
if (ret == 0) {
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- iwl_sta_ucode_activate(priv, sta->sta.sta_id);
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_WARN(priv, "REPLY_ADD_STA failed\n");
- break;
- }
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ iwl_process_add_sta_resp(priv, sta, pkt, true);
}
-
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
@@ -299,7 +339,7 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
}
EXPORT_SYMBOL(iwl_add_station);
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
{
unsigned long flags;
u8 sta_id = iwl_find_station(priv, addr);
@@ -326,7 +366,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
{
struct iwl_rem_sta_cmd *rm_sta =
(struct iwl_rem_sta_cmd *)cmd->cmd.payload;
- const char *addr = rm_sta->addr;
+ const u8 *addr = rm_sta->addr;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
@@ -391,9 +431,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
break;
}
}
-
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
@@ -1007,24 +1045,19 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
u8 sta_id;
- /* Add station to device's station table */
-
/*
- * XXX: This check is definitely not correct, if we're an AP
- * it'll always be false which is not what we want, but
- * it doesn't look like iwlagn is prepared to be an HT
- * AP anyway.
+ * Set HT capabilities. It is ok to set this struct even if not using
+ * HT config: the priv->current_ht_config.is_ht flag will just be false
*/
- if (priv->current_ht_config.is_ht) {
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, addr);
- if (sta) {
- memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
- cur_ht_config = &ht_config;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, addr);
+ if (sta) {
+ memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
+ cur_ht_config = &ht_config;
}
+ rcu_read_unlock();
+ /* Add station to device's station table */
sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
/* Set up default rate scaling table in device's station table */
@@ -1089,6 +1122,7 @@ static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
*/
void iwl_add_bcast_station(struct iwl_priv *priv)
{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
/* Set up default rate scaling table in device's station table */
@@ -1097,6 +1131,16 @@ void iwl_add_bcast_station(struct iwl_priv *priv)
EXPORT_SYMBOL(iwl_add_bcast_station);
/**
+ * iwl3945_add_bcast_station - add broadcast station into station table.
+ */
+void iwl3945_add_bcast_station(struct iwl_priv *priv)
+{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
+ iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+}
+EXPORT_SYMBOL(iwl3945_add_bcast_station);
+
+/**
* iwl_get_sta_id - Find station's index within station table
*
* If new IBSS station, create new entry in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 8d052de2d405..2dc35fe28f56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -53,6 +53,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_add_bcast_station(struct iwl_priv *priv);
+void iwl3945_add_bcast_station(struct iwl_priv *priv);
int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_clear_stations_table(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 00da5e152d46..8dd0c036d547 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <net/mac80211.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -60,7 +61,8 @@ static const u16 default_tid_to_tx_fifo[] = {
static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
struct iwl_dma_ptr *ptr, size_t size)
{
- ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
@@ -73,21 +75,20 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
if (unlikely(!ptr->addr))
return;
- pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
-int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
u32 reg = 0;
- int ret = 0;
int txq_id = txq->q.id;
if (txq->need_update == 0)
- return ret;
+ return;
/* if we're trying to save power */
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
@@ -101,7 +102,7 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
txq_id, reg);
iwl_set_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return ret;
+ return;
}
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
@@ -114,12 +115,24 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
txq->q.write_ptr | (txq_id << 8));
txq->need_update = 0;
-
- return ret;
}
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed)
+{
+ if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+ priv->stations[sta_id].tid[tid].tfds_in_queue,
+ freed);
+ priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+EXPORT_SYMBOL(iwl_free_tfds_in_queue);
+
/**
* iwl_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
@@ -132,7 +145,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i;
if (q->n_bd == 0)
@@ -149,8 +162,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(dev, priv->hw_params.tfd_size *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
/* De-alloc array of per-TFD driver data */
kfree(txq->txb);
@@ -179,20 +192,44 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i;
+ bool huge = false;
if (q->n_bd == 0)
return;
+ for (; q->read_ptr != q->write_ptr;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ /* we have no way to tell if it is a huge cmd ATM */
+ i = get_cmd_index(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_SIZE_HUGE) {
+ huge = true;
+ continue;
+ }
+
+ pci_unmap_single(priv->pci_dev,
+ pci_unmap_addr(&txq->meta[i], mapping),
+ pci_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ }
+ if (huge) {
+ i = q->n_window;
+ pci_unmap_single(priv->pci_dev,
+ pci_unmap_addr(&txq->meta[i], mapping),
+ pci_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ }
+
/* De-alloc array of command/tx buffers */
for (i = 0; i <= TFD_CMD_SLOTS; i++)
kfree(txq->cmd[i]);
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
/* deallocate arrays */
kfree(txq->cmd);
@@ -283,7 +320,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
static int iwl_tx_queue_alloc(struct iwl_priv *priv,
struct iwl_tx_queue *txq, u32 id)
{
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
/* Driver private data, only for Tx (not command) queues,
@@ -302,8 +339,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
- txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
-
+ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
+ GFP_KERNEL);
if (!txq->tfds) {
IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
goto error;
@@ -352,7 +389,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
for (i = 0; i < actual_slots; i++) {
/* only happens for cmd queue */
if (i == slots_num)
- len += IWL_MAX_SCAN_SIZE;
+ len = IWL_MAX_CMD_SIZE;
txq->cmd[i] = kmalloc(len, GFP_KERNEL);
if (!txq->cmd[i])
@@ -397,6 +434,26 @@ out_free_arrays:
}
EXPORT_SYMBOL(iwl_tx_queue_init);
+void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ priv->cfg->ops->lib->txq_init(priv, txq);
+}
+EXPORT_SYMBOL(iwl_tx_queue_reset);
+
/**
* iwl_hw_txq_ctx_free - Free TXQ Context
*
@@ -407,13 +464,13 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
int txq_id;
/* Tx queues */
- if (priv->txq)
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
- txq_id++)
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
if (txq_id == IWL_CMD_QUEUE_NUM)
iwl_cmd_queue_free(priv);
else
iwl_tx_queue_free(priv, txq_id);
+ }
iwl_free_dma_ptr(priv, &priv->kw);
iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
@@ -424,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
/**
- * iwl_txq_ctx_reset - Reset TX queue context
- * Destroys all DMA structures and initialize them again
+ * iwl_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
*
* @param priv
* @return error code
*/
-int iwl_txq_ctx_reset(struct iwl_priv *priv)
+int iwl_txq_ctx_alloc(struct iwl_priv *priv)
{
- int ret = 0;
+ int ret;
int txq_id, slots_num;
unsigned long flags;
@@ -490,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
return ret;
}
+void iwl_txq_ctx_reset(struct iwl_priv *priv)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
+ }
+}
+
/**
- * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
+ * iwl_txq_ctx_stop - Stop all Tx DMA channels
*/
void iwl_txq_ctx_stop(struct iwl_priv *priv)
{
@@ -511,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
1000);
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Deallocate memory for all Tx queues */
- iwl_hw_txq_ctx_free(priv);
}
EXPORT_SYMBOL(iwl_txq_ctx_stop);
@@ -730,7 +807,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 tid = 0;
u8 *qc = NULL;
unsigned long flags;
- int ret;
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
@@ -805,8 +881,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ }
}
txq = &priv->txq[txq_id];
@@ -948,7 +1026,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- ret = iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
/*
@@ -962,9 +1040,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_priv && sta_priv->client)
atomic_inc(&sta_priv->pending_frames);
- if (ret)
- return ret;
-
if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&priv->lock, flags);
@@ -1003,7 +1078,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
- int len, ret;
+ int len;
u32 idx;
u16 fix_size;
@@ -1012,9 +1087,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* If any of the command structures end up being larger than
* the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
- * we will need to increase the size of the TFD entries */
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
!(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
IWL_WARN(priv, "Not sending command - %s KILL\n",
@@ -1035,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
spin_lock_irqsave(&priv->hcmd_lock, flags);
+ /* If this is a huge cmd, mark the huge flag also on the meta.flags
+ * of the _original_ cmd. This is used for DMA mapping clean up.
+ */
+ if (cmd->flags & CMD_SIZE_HUGE) {
+ idx = get_cmd_index(q, q->write_ptr, 0);
+ txq->meta[idx].flags = CMD_SIZE_HUGE;
+ }
+
idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
out_cmd = txq->cmd[idx];
out_meta = &txq->meta[idx];
@@ -1058,8 +1144,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
if (cmd->flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
len = sizeof(struct iwl_device_cmd);
- len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
-
+ if (idx == TFD_CMD_SLOTS)
+ len = IWL_MAX_CMD_SIZE;
#ifdef CONFIG_IWLWIFI_DEBUG
switch (out_cmd->hdr.cmd) {
@@ -1100,10 +1186,10 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- ret = iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return ret ? ret : idx;
+ return idx;
}
static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
@@ -1130,6 +1216,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
struct iwl_queue *q = &txq->q;
struct iwl_tx_info *tx_info;
int nfreed = 0;
+ struct ieee80211_hdr *hdr;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1144,13 +1231,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
tx_info = &txq->txb[txq->q.read_ptr];
iwl_tx_status(priv, tx_info->skb[0]);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
tx_info->skb[0] = NULL;
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- nfreed++;
}
return nfreed;
}
@@ -1208,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
+ struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
@@ -1221,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
return;
}
- cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
- cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
- meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
+ /* If this is a huge cmd, clear the huge flag on the meta.flags
+ * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
+ * the DMA buffer for the scan (huge) command.
+ */
+ if (huge) {
+ cmd_index = get_cmd_index(&txq->q, index, 0);
+ txq->meta[cmd_index].flags = 0;
+ }
+ cmd_index = get_cmd_index(&txq->q, index, huge);
+ cmd = txq->cmd[cmd_index];
+ meta = &txq->meta[cmd_index];
pci_unmap_single(priv->pci_dev,
pci_unmap_addr(meta, mapping),
@@ -1241,8 +1340,11 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
if (!(meta->flags & CMD_ASYNC)) {
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ get_cmd_string(cmd->hdr.cmd));
wake_up_interruptible(&priv->wait_command_queue);
}
+ meta->flags = 0;
}
EXPORT_SYMBOL(iwl_tx_cmd_complete);
@@ -1327,7 +1429,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
{
int tx_fifo_id, txq_id, sta_id, ssn = -1;
struct iwl_tid_data *tid_data;
- int ret, write_ptr, read_ptr;
+ int write_ptr, read_ptr;
unsigned long flags;
if (!ra) {
@@ -1379,13 +1481,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
spin_lock_irqsave(&priv->lock, flags);
- ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
- if (ret)
- return ret;
-
ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
return 0;
@@ -1558,7 +1664,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
/* calculate mac80211 ampdu sw queue to wake */
int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 2a28a1f8b1fe..b55e4f39a9e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
@@ -53,9 +54,10 @@
#include "iwl-commands.h"
#include "iwl-sta.h"
#include "iwl-3945.h"
-#include "iwl-helpers.h"
#include "iwl-core.h"
+#include "iwl-helpers.h"
#include "iwl-dev.h"
+#include "iwl-spectrum.h"
/*
* module name, copyright, version, etc.
@@ -70,14 +72,13 @@
#define VD
#endif
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -352,10 +353,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
static void iwl3945_unset_hw_params(struct iwl_priv *priv)
{
if (priv->shared_virt)
- pci_free_consistent(priv->pci_dev,
- sizeof(struct iwl3945_shared),
- priv->shared_virt,
- priv->shared_phys);
+ dma_free_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ priv->shared_virt,
+ priv->shared_phys);
}
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -478,7 +479,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 wait_write_ptr = 0;
u8 *qc = NULL;
unsigned long flags;
- int rc;
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
@@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq = &priv->txq[txq_id];
q = &txq->q;
+ if ((iwl_queue_space(q) < q->high_mark))
+ goto drop;
+
spin_lock_irqsave(&priv->lock, flags);
idx = get_cmd_index(q, q->write_ptr, 0);
@@ -660,12 +663,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- rc = iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
- if (rc)
- return rc;
-
if ((iwl_queue_space(q) < q->high_mark)
&& priv->mac80211_registered) {
if (wait_write_ptr) {
@@ -686,10 +686,6 @@ drop:
return -1;
}
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
-#include "iwl-spectrum.h"
-
#define BEACON_TIME_MASK_LOW 0x00FFFFFF
#define BEACON_TIME_MASK_HIGH 0xFF000000
#define TIME_UNIT 1024
@@ -812,11 +808,10 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
break;
}
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return rc;
}
-#endif
static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -959,6 +954,8 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -972,7 +969,6 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
@@ -1064,13 +1060,13 @@ static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
+static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
- int write, rc;
+ int write;
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
@@ -1100,12 +1096,8 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- rc = iwl_rx_queue_update_write_ptr(priv, rxq);
- if (rc)
- return rc;
+ iwl_rx_queue_update_write_ptr(priv, rxq);
}
-
- return 0;
}
/**
@@ -1198,9 +1190,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- priv->alloc_rxb_page--;
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1247,17 +1237,15 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
- priv->alloc_rxb_page--;
}
}
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@@ -1300,47 +1288,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
return (int)ratio2dB[sig_ratio];
}
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95) /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- * about formulas used below. */
-int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
- int sig_qual;
- int degradation = PERFECT_RSSI - rssi_dbm;
-
- /* If we get a noise measurement, use signal-to-noise ratio (SNR)
- * as indicator; formula is (signal dbm - noise dbm).
- * SNR at or above 40 is a great signal (100%).
- * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
- * Weakest usable signal is usually 10 - 15 dB SNR. */
- if (noise_dbm) {
- if (rssi_dbm - noise_dbm >= 40)
- return 100;
- else if (rssi_dbm < noise_dbm)
- return 0;
- sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
- /* Else use just the signal level.
- * This formula is a least squares fit of data points collected and
- * compared with a reference system that had a percentage (%) display
- * for signal quality. */
- } else
- sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
- (15 * RSSI_RANGE + 62 * degradation)) /
- (RSSI_RANGE * RSSI_RANGE);
-
- if (sig_qual > 100)
- sig_qual = 100;
- else if (sig_qual < 1)
- sig_qual = 0;
-
- return sig_qual;
-}
-
/**
* iwl3945_rx_handle - Main entry function for receiving responses from uCode
*
@@ -1560,8 +1507,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
* iwl3945_print_event_log - Dump error event log to syslog
*
*/
-static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1571,7 +1519,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
@@ -1597,26 +1545,43 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "0x%08x:%04u\n",
+ time, ev);
+ } else {
+ IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
+ time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1624,21 +1589,28 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl3945_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl3945_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl3945_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
} else {
if (next_entry < size)
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1646,7 +1618,8 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1654,11 +1627,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
if (!iwl3945_hw_valid_rtc_data_addr(base)) {
IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
+ return -EINVAL;
}
/* event log header */
@@ -1684,11 +1659,11 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+ if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -1700,25 +1675,38 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/* if uCode has wrapped back to top of log,
* start at the oldest entry,
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl3945_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl3945_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
-
+ return pos;
}
static void iwl3945_irq_tasklet(struct iwl_priv *priv)
@@ -1968,7 +1956,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
{
int i;
- for (i = 0; i < IWL_RATE_COUNT; i++) {
+ for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
rates[i].bitrate = iwl3945_rates[i].ieee * 5;
rates[i].hw_value = i; /* Rate scaling will work on indexes */
rates[i].hw_value_short = i;
@@ -3038,18 +3026,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-static void iwl3945_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl3945_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -3066,7 +3042,13 @@ static void iwl3945_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl3945_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl3945_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -3570,8 +3552,6 @@ static ssize_t store_filter_flags(struct device *d,
static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
store_filter_flags);
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
static ssize_t show_measurement(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3641,7 +3621,6 @@ static ssize_t store_measurement(struct device *d,
static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
show_measurement, store_measurement);
-#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
static ssize_t store_retry_rate(struct device *d,
struct device_attribute *attr,
@@ -3790,7 +3769,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl3945_bg_up);
INIT_WORK(&priv->restart, iwl3945_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
@@ -3824,9 +3802,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_dump_errors.attr,
&dev_attr_flags.attr,
&dev_attr_filter_flags.attr,
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
&dev_attr_measurement.attr,
-#endif
&dev_attr_retry_rate.attr,
&dev_attr_statistics.attr,
&dev_attr_status.attr,
@@ -3852,7 +3828,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.config = iwl_mac_config,
.configure_filter = iwl_configure_filter,
.set_key = iwl3945_mac_set_key,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
@@ -3867,13 +3842,13 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->retry_rate = 1;
priv->ibss_beacon = NULL;
- spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
INIT_LIST_HEAD(&priv->free_frames);
mutex_init(&priv->mutex);
+ mutex_init(&priv->sync_cmd_mutex);
/* Clear the driver's (not device's) station table */
iwl_clear_stations_table(priv);
@@ -3883,6 +3858,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
iwl_reset_qos(priv);
@@ -3936,15 +3912,17 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ IEEE80211_HW_SPECTRUM_MGMT;
+
+ if (!priv->cfg->broken_powersave)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
@@ -4057,10 +4035,18 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, 0x41, 0x00);
- /* this spin lock will be used in apm_ops.init and EEPROM access
+ /* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/***********************
* 4. Read EEPROM
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 7c4f44a9c3e6..a1d45cce0ebc 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
+#include <linux/slab.h>
#include <net/cfg80211.h>
#include "iwm.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 777584d76a88..42df7262f9f7 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -41,6 +41,7 @@
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include "iwm.h"
#include "bus.h"
@@ -973,6 +974,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm,
memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
+ update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
+ update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
+ sizeof(struct iwm_umac_wifi_if));
+
update.command = cpu_to_le32(command);
if (pmksa->bssid)
memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 06af0552cd75..3dfd9f0e9003 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx {
#define IWM_CMD_PMKID_FLUSH 3
struct iwm_umac_pmkid_update {
+ struct iwm_umac_wifi_if hdr;
__le32 command;
u8 bssid[ETH_ALEN];
__le16 reserved;
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index be992ca41cf1..cbb81befdb55 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
@@ -89,7 +90,7 @@ static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
for (i = 0; i < __IWM_DM_NR; i++)
iwm->dbg.dbg_module[i] = 0;
- for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
+ for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
return 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c
index 8091421ee5e5..e80e776b74f7 100644
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.c
+++ b/drivers/net/wireless/iwmc3200wifi/eeprom.c
@@ -37,6 +37,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include "iwm.h"
#include "umac.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index d13c8853ee82..229de990379c 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -98,6 +98,7 @@
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include "iwm.h"
#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 5a26bb05a33a..79ffa3b98d73 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
int iwm_down(struct iwm_priv *iwm);
/* TX API */
-u16 iwm_tid_to_queue(u16 tid);
+int iwm_tid_to_queue(u16 tid);
void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
void iwm_tx_worker(struct work_struct *work);
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index a3a79b5e2898..a855a99e49b8 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -262,7 +262,7 @@ struct iwm_ct_kill_cfg_cmd {
/* Power Management */
#define POWER_TABLE_CMD 0x77
-#define SAVE_RESTORE_ADRESS_CMD 0x78
+#define SAVE_RESTORE_ADDRESS_CMD 0x78
#define REPLY_WATERMARK_CMD 0x79
#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B
#define PD_FLUSH_N_NOTIFICATION 0x7C
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 7f34d6dd3c41..23856d359e12 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -41,6 +41,7 @@
#include <linux/sched.h>
#include <linux/ieee80211.h>
#include <linux/wireless.h>
+#include <linux/slab.h>
#include "iwm.h"
#include "debug.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index e4f0f8705f65..13a69ebf2a94 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -46,6 +46,7 @@
* -> sdio_disable_func()
*/
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include "iwm.h"
#include "commands.h"
@@ -76,7 +77,7 @@ static int iwm_stop(struct net_device *ndev)
*/
static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-u16 iwm_tid_to_queue(u16 tid)
+int iwm_tid_to_queue(u16 tid)
{
if (tid > IWM_UMAC_TID_NR - 2)
return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 1c57c1f72cba..3257d4fad835 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -44,6 +44,7 @@
#include <linux/ieee80211.h>
#include <linux/if_arp.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <net/iw_handler.h>
#include "iwm.h"
@@ -794,7 +795,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
}
bss->bss = kzalloc(bss_len, GFP_KERNEL);
- if (!bss) {
+ if (!bss->bss) {
kfree(bss);
IWM_ERR(iwm, "Couldn't allocate bss\n");
return -ENOMEM;
@@ -868,36 +869,35 @@ static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
struct iwm_umac_notif_mgt_frame *mgt_frame =
(struct iwm_umac_notif_mgt_frame *)buf;
struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
- u8 *ie;
IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
le16_to_cpu(mgt_frame->len));
if (ieee80211_is_assoc_req(mgt->frame_control)) {
- ie = mgt->u.assoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
- ie = mgt->u.reassoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
- ie = mgt->u.assoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
- ie = mgt->u.reassoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
@@ -1117,7 +1117,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
return -EINVAL;
}
- for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
+ for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
tid_info = &sta_info->tid_info[bit];
mutex_lock(&tid_info->mutex);
@@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
if (!stop) {
struct iwm_tx_queue *txq;
- u16 queue = iwm_tid_to_queue(bit);
+ int queue = iwm_tid_to_queue(bit);
if (queue < 0)
continue;
@@ -1534,6 +1534,33 @@ static void classify8023(struct sk_buff *skb)
}
}
+static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
+{
+ struct wireless_dev *wdev = iwm_to_wdev(iwm);
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ struct sk_buff_head list;
+ struct sk_buff *frame;
+
+ IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
+
+ __skb_queue_head_init(&list);
+ ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
+
+ while ((frame = __skb_dequeue(&list))) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += frame->len;
+
+ frame->protocol = eth_type_trans(frame, ndev);
+ frame->ip_summed = CHECKSUM_NONE;
+ memset(frame->cb, 0, sizeof(frame->cb));
+
+ if (netif_rx_ni(frame) == NET_RX_DROP) {
+ IWM_ERR(iwm, "Packet dropped\n");
+ ndev->stats.rx_dropped++;
+ }
+ }
+}
+
static void iwm_rx_process_packet(struct iwm_priv *iwm,
struct iwm_rx_packet *packet,
struct iwm_rx_ticket_node *ticket_node)
@@ -1548,25 +1575,34 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
switch (le16_to_cpu(ticket_node->ticket->action)) {
case IWM_RX_TICKET_RELEASE:
IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
- classify8023(skb);
+
iwm_rx_adjust_packet(iwm, packet, ticket_node);
+ skb->dev = iwm_to_ndev(iwm);
+ classify8023(skb);
+
+ if (le16_to_cpu(ticket_node->ticket->flags) &
+ IWM_RX_TICKET_AMSDU_MSK) {
+ iwm_rx_process_amsdu(iwm, skb);
+ break;
+ }
+
ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
if (ret < 0) {
IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
"%d\n", ret);
+ kfree_skb(packet->skb);
break;
}
IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
- skb->dev = iwm_to_ndev(iwm);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
memset(skb->cb, 0, sizeof(skb->cb));
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
if (netif_rx_ni(skb) == NET_RX_DROP) {
IWM_ERR(iwm, "Packet dropped\n");
ndev->stats.rx_dropped++;
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index a7ec7eac9137..1eafd6dec3fd 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -63,6 +63,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/mmc/sdio_ids.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index 55905f02309c..f6a02f123f31 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -64,6 +64,7 @@
* (i.e. half of the max size). [iwm_tx_worker]
*/
+#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
index 30aa9d48d67e..0485c9957575 100644
--- a/drivers/net/wireless/libertas/Kconfig
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -37,3 +37,9 @@ config LIBERTAS_DEBUG
depends on LIBERTAS
---help---
Debugging support.
+
+config LIBERTAS_MESH
+ bool "Enable mesh support"
+ depends on LIBERTAS
+ help
+ This enables Libertas' MESH support, used by e.g. the OLPC people.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index b188cd97a053..45e870e33117 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -5,11 +5,11 @@ libertas-y += cmdresp.o
libertas-y += debugfs.o
libertas-y += ethtool.o
libertas-y += main.o
-libertas-y += mesh.o
libertas-y += rx.o
libertas-y += scan.o
libertas-y += tx.o
libertas-y += wext.o
+libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
usb8xxx-objs += if_usb.o
libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 751067369ba8..12a2ef9dacea 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include <linux/if_arp.h>
+#include <linux/slab.h>
#include <net/lib80211.h>
#include "assoc.h"
@@ -390,10 +391,8 @@ int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
- if (!ret && cmd_action == CMD_ACT_GET) {
- priv->ratebitmap = le16_to_cpu(cmd.bitmap);
+ if (!ret && cmd_action == CMD_ACT_GET)
priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
- }
lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
@@ -807,8 +806,7 @@ static int lbs_try_associate(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
+ if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
preamble = RADIO_PREAMBLE_SHORT;
ret = lbs_set_radio(priv, preamble, 1);
@@ -939,8 +937,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
+ if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
lbs_deb_join("AdhocJoin: Short preamble\n");
preamble = RADIO_PREAMBLE_SHORT;
}
@@ -1049,7 +1046,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
struct assoc_request *assoc_req)
{
struct cmd_ds_802_11_ad_hoc_start cmd;
- u8 preamble = RADIO_PREAMBLE_LONG;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
size_t ratesize = 0;
u16 tmpcap = 0;
int ret = 0;
@@ -1057,11 +1054,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
lbs_deb_enter(LBS_DEB_ASSOC);
- if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- lbs_deb_join("ADHOC_START: Will use short preamble\n");
- preamble = RADIO_PREAMBLE_SHORT;
- }
-
ret = lbs_set_radio(priv, preamble, 1);
if (ret)
goto out;
@@ -1169,11 +1161,11 @@ int lbs_adhoc_stop(struct lbs_private *priv)
static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && match_bss->wpa_ie[0] != WLAN_EID_GENERIC
- && match_bss->rsn_ie[0] != WLAN_EID_RSN
- && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
+ match_bss->rsn_ie[0] != WLAN_EID_RSN &&
+ !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1182,9 +1174,9 @@ static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1193,8 +1185,8 @@ static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && secinfo->WPAenabled
- && (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
+ if (!secinfo->wep_enabled && secinfo->WPAenabled &&
+ (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
/* privacy bit may NOT be set in some APs like LinkSys WRT54G
&& (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
)
@@ -1219,11 +1211,11 @@ static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->wpa_ie[0] != WLAN_EID_GENERIC)
- && (match_bss->rsn_ie[0] != WLAN_EID_RSN)
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
+ (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1534,8 +1526,8 @@ static int assoc_helper_associate(struct lbs_private *priv,
/* If we're given and 'any' BSSID, try associating based on SSID */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(bssid_any, assoc_req->bssid)
- && compare_ether_addr(bssid_off, assoc_req->bssid)) {
+ if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
+ compare_ether_addr(bssid_off, assoc_req->bssid)) {
ret = assoc_helper_bssid(priv, assoc_req);
done = 1;
}
@@ -1621,11 +1613,9 @@ static int assoc_helper_channel(struct lbs_private *priv,
goto restore_mesh;
}
- if ( assoc_req->secinfo.wep_enabled
- && (assoc_req->wep_keys[0].len
- || assoc_req->wep_keys[1].len
- || assoc_req->wep_keys[2].len
- || assoc_req->wep_keys[3].len)) {
+ if (assoc_req->secinfo.wep_enabled &&
+ (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
+ assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
/* Make sure WEP keys are re-sent to firmware */
set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
}
@@ -1992,14 +1982,14 @@ void lbs_association_worker(struct work_struct *work)
assoc_req->secinfo.auth_mode);
/* If 'any' SSID was specified, find an SSID to associate with */
- if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)
- && !assoc_req->ssid_len)
+ if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
+ !assoc_req->ssid_len)
find_any_ssid = 1;
/* But don't use 'any' SSID if there's a valid locked BSSID to use */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(assoc_req->bssid, bssid_any)
- && compare_ether_addr(assoc_req->bssid, bssid_off))
+ if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
+ compare_ether_addr(assoc_req->bssid, bssid_off))
find_any_ssid = 0;
}
@@ -2061,13 +2051,6 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if ( test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
- ret = assoc_helper_wep_keys(priv, assoc_req);
- if (ret)
- goto out;
- }
-
if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
ret = assoc_helper_secinfo(priv, assoc_req);
if (ret)
@@ -2080,18 +2063,31 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
+ /*
+ * v10 FW wants WPA keys to be set/cleared before WEP key operations,
+ * otherwise it will fail to correctly associate to WEP networks.
+ * Other firmware versions don't appear to care.
+ */
+ if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
ret = assoc_helper_wpa_keys(priv, assoc_req);
if (ret)
goto out;
}
+ if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
+ ret = assoc_helper_wep_keys(priv, assoc_req);
+ if (ret)
+ goto out;
+ }
+
+
/* SSID/BSSID should be the _last_ config option set, because they
* trigger the association attempt.
*/
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
+ if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
int success = 1;
ret = assoc_helper_associate(priv, assoc_req);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 4396dccd12ac..ce7bec402a33 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/slab.h>
#include <net/cfg80211.h>
#include "cfg.h"
@@ -172,6 +173,8 @@ int lbs_cfg_register(struct lbs_private *priv)
if (ret < 0)
lbs_pr_err("cannot register wiphy device\n");
+ priv->wiphy_registered = true;
+
ret = register_netdev(priv->dev);
if (ret)
lbs_pr_err("cannot register network device\n");
@@ -190,9 +193,11 @@ void lbs_cfg_free(struct lbs_private *priv)
if (!wdev)
return;
- if (wdev->wiphy) {
+ if (priv->wiphy_registered)
wiphy_unregister(wdev->wiphy);
+
+ if (wdev->wiphy)
wiphy_free(wdev->wiphy);
- }
+
kfree(wdev);
}
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 42611bea76a3..cdb9b9650d73 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -5,6 +5,7 @@
#include <linux/kfifo.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include "host.h"
#include "decl.h"
@@ -143,19 +144,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
cmd.hwifversion, cmd.version);
- /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
- /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
- /* 5.110.22 have mesh command with 0xa3 command id */
- /* 10.0.0.p0 FW brings in mesh config command with different id */
- /* Check FW version MSB and initialize mesh_fw_ver */
- if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
- priv->mesh_fw_ver = MESH_FW_OLD;
- else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
- (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
- priv->mesh_fw_ver = MESH_FW_NEW;
- else
- priv->mesh_fw_ver = MESH_NONE;
-
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some firmware
* returns non-zero high 8 bits here.
@@ -855,9 +843,6 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
if (priv->fwrelease < 0x09000000) {
switch (preamble) {
case RADIO_PREAMBLE_SHORT:
- if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
- goto out;
- /* Fall through */
case RADIO_PREAMBLE_AUTO:
case RADIO_PREAMBLE_LONG:
cmd.control = cpu_to_le16(preamble);
@@ -1011,6 +996,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = 0;
break;
+#ifdef CONFIG_LIBERTAS_MESH
+
case CMD_BT_ACCESS:
ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
break;
@@ -1019,6 +1006,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
break;
+#endif
+
case CMD_802_11_BEACON_CTRL:
ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
break;
@@ -1317,7 +1306,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
if ((priv->psmode != LBS802_11POWERMODECAM) &&
(priv->psstate == PS_STATE_FULL_POWER) &&
((priv->connect_status == LBS_CONNECTED) ||
- (priv->mesh_connect_status == LBS_CONNECTED))) {
+ lbs_mesh_connected(priv))) {
if (priv->secinfo.WPAenabled ||
priv->secinfo.WPA2enabled) {
/* check for valid WPA group keys */
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 2862748aef70..cb4138a55fdf 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -110,18 +110,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
-/* Mesh related */
-
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
- struct cmd_ds_mesh_access *cmd);
-
-int lbs_mesh_config_send(struct lbs_private *priv,
- struct cmd_ds_mesh_config *cmd,
- uint16_t action, uint16_t type);
-
-int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
-
-
/* Commands only used in wext.c, assoc. and scan.c */
int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 21d57690c20a..88f7131d66e9 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -2,6 +2,7 @@
* This file contains the handling of command
* responses as well as events generated by firmware.
*/
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/if_arp.h>
@@ -240,11 +241,6 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
/* Now we got response from FW, cancel the command timer */
del_timer(&priv->command_timer);
priv->cmd_timed_out = 0;
- if (priv->nr_retries) {
- lbs_pr_info("Received result %x to command %x after %d retries\n",
- result, curcmd, priv->nr_retries);
- priv->nr_retries = 0;
- }
/* Store the response code to cur_cmd_retcode. */
priv->cur_cmd_retcode = result;
@@ -485,20 +481,8 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
break;
case MACREG_INT_CODE_MESH_AUTO_STARTED:
- /* Ignore spurious autostart events if autostart is disabled */
- if (!priv->mesh_autostart_enabled) {
- lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
- break;
- }
- lbs_pr_info("EVENT: MESH_AUTO_STARTED\n");
- priv->mesh_connect_status = LBS_CONNECTED;
- if (priv->mesh_open) {
- netif_carrier_on(priv->mesh_dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
- }
- priv->mode = IW_MODE_ADHOC;
- schedule_work(&priv->sync_channel);
+ /* Ignore spurious autostart events */
+ lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
break;
default:
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 587b0cb0088d..a48ccaffb288 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -4,6 +4,7 @@
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/string.h>
+#include <linux/slab.h>
#include <net/iw_handler.h>
#include <net/lib80211.h>
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 6b6ea9f7bf5b..ea3f10ef4e00 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -397,13 +397,6 @@ enum KEY_INFO_WPA {
KEY_INFO_WPA_ENABLED = 0x04
};
-/** mesh_fw_ver */
-enum _mesh_fw_ver {
- MESH_NONE = 0, /* MESH is not supported */
- MESH_FW_OLD, /* MESH is supported in FW V5 */
- MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
-};
-
/* Default values for fwt commands. */
#define FWT_DEFAULT_METRIC 0
#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 05bb298dfae9..6875e1498bd5 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -36,18 +36,18 @@ struct lbs_private {
/* CFG80211 */
struct wireless_dev *wdev;
+ bool wiphy_registered;
/* Mesh */
struct net_device *mesh_dev; /* Virtual device */
+#ifdef CONFIG_LIBERTAS_MESH
u32 mesh_connect_status;
struct lbs_mesh_stats mstats;
int mesh_open;
- int mesh_fw_ver;
- int mesh_autostart_enabled;
uint16_t mesh_tlv;
u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 mesh_ssid_len;
- struct work_struct sync_channel;
+#endif
/* Monitor mode */
struct net_device *rtap_net_dev;
@@ -110,7 +110,6 @@ struct lbs_private {
struct list_head cmdpendingq; /* pending command buffers */
wait_queue_head_t cmd_pending;
struct timer_list command_timer;
- int nr_retries;
int cmd_timed_out;
/* Command responses sent from the hardware to the driver */
@@ -176,9 +175,7 @@ struct lbs_private {
struct bss_descriptor *networks;
struct assoc_request * pending_assoc_req;
struct assoc_request * in_progress_assoc_req;
- u16 capability;
uint16_t enablehwauto;
- uint16_t ratebitmap;
/* ADHOC */
u16 beacon_period;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 63d020374c2b..3804a58d7f4e 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -114,9 +114,11 @@ const struct ethtool_ops lbs_ethtool_ops = {
.get_drvinfo = lbs_ethtool_get_drvinfo,
.get_eeprom = lbs_ethtool_get_eeprom,
.get_eeprom_len = lbs_ethtool_get_eeprom_len,
+#ifdef CONFIG_LIBERTAS_MESH
.get_sset_count = lbs_mesh_ethtool_get_sset_count,
.get_ethtool_stats = lbs_mesh_ethtool_get_stats,
.get_strings = lbs_mesh_ethtool_get_strings,
+#endif
.get_wol = lbs_ethtool_get_wol,
.set_wol = lbs_ethtool_set_wol,
};
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 1f6cb58dd66c..6d55439a7b97 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -22,6 +22,7 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 7a73f625273b..7d1a3c6b6ce0 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index bf4bfbae6227..fe3f08028eb3 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -23,6 +23,8 @@
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 65e174595d12..fcea5741ba62 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -5,6 +5,7 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#ifdef CONFIG_OLPC
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index c2975c8e2f21..598080414b17 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -13,6 +13,7 @@
#include <linux/kfifo.h>
#include <linux/stddef.h>
#include <linux/ieee80211.h>
+#include <linux/slab.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
@@ -123,7 +124,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
if (priv->monitormode == monitor_mode)
return strlen(buf);
if (!priv->monitormode) {
- if (priv->infra_open || priv->mesh_open)
+ if (priv->infra_open || lbs_mesh_open(priv))
return -EBUSY;
if (priv->mode == IW_MODE_INFRA)
lbs_cmd_80211_deauthenticate(priv,
@@ -319,15 +320,18 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
{
int i = nr_addrs;
struct dev_mc_list *mc_list;
+ int cnt;
if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
return nr_addrs;
netif_addr_lock_bh(dev);
- for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
+ cnt = netdev_mc_count(dev);
+ netdev_for_each_mc_addr(mc_list, dev) {
if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
mc_list->dmi_addr);
+ cnt--;
continue;
}
@@ -337,9 +341,10 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
mc_list->dmi_addr);
i++;
+ cnt--;
}
netif_addr_unlock_bh(dev);
- if (mc_list)
+ if (cnt)
return -EOVERFLOW;
return i;
@@ -536,31 +541,14 @@ static int lbs_thread(void *data)
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
- if (++priv->nr_retries > 3) {
- lbs_pr_info("Excessive timeouts submitting "
- "command 0x%04x\n",
- le16_to_cpu(cmdnode->cmdbuf->command));
- lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
- priv->nr_retries = 0;
- if (priv->reset_card)
- priv->reset_card(priv);
- } else {
- priv->cur_cmd = NULL;
- priv->dnld_sent = DNLD_RES_RECEIVED;
- lbs_pr_info("requeueing command 0x%04x due "
- "to timeout (#%d)\n",
- le16_to_cpu(cmdnode->cmdbuf->command),
- priv->nr_retries);
-
- /* Stick it back at the _top_ of the pending queue
- for immediate resubmission */
- list_add(&cmdnode->list, &priv->cmdpendingq);
- }
+ lbs_pr_info("Timeout submitting command 0x%04x\n",
+ le16_to_cpu(cmdnode->cmdbuf->command));
+ lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
+ if (priv->reset_card)
+ priv->reset_card(priv);
}
priv->cmd_timed_out = 0;
-
-
if (!priv->fw_ready)
continue;
@@ -622,7 +610,7 @@ static int lbs_thread(void *data)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
if (priv->mesh_dev &&
- priv->mesh_connect_status == LBS_CONNECTED)
+ lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
}
@@ -732,7 +720,7 @@ done:
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
-static void command_timer_fn(unsigned long data)
+static void lbs_cmd_timeout_handler(unsigned long data)
{
struct lbs_private *priv = (struct lbs_private *)data;
unsigned long flags;
@@ -809,18 +797,6 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
return 0;
}
-static void lbs_sync_channel_worker(struct work_struct *work)
-{
- struct lbs_private *priv = container_of(work, struct lbs_private,
- sync_channel);
-
- lbs_deb_enter(LBS_DEB_MAIN);
- if (lbs_update_channel(priv))
- lbs_pr_info("Channel synchronization failed.");
- lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-
static int lbs_init_adapter(struct lbs_private *priv)
{
size_t bufsize;
@@ -848,14 +824,12 @@ static int lbs_init_adapter(struct lbs_private *priv)
memset(priv->current_addr, 0xff, ETH_ALEN);
priv->connect_status = LBS_DISCONNECTED;
- priv->mesh_connect_status = LBS_DISCONNECTED;
priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
priv->mode = IW_MODE_INFRA;
priv->channel = DEFAULT_AD_HOC_CHANNEL;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
priv->radio_on = 1;
priv->enablehwauto = 1;
- priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
priv->psmode = LBS802_11POWERMODECAM;
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
@@ -865,7 +839,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
mutex_init(&priv->lock);
- setup_timer(&priv->command_timer, command_timer_fn,
+ setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
(unsigned long)priv);
setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
(unsigned long)priv);
@@ -998,11 +972,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
- INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
-
- priv->mesh_open = 0;
- sprintf(priv->mesh_ssid, "mesh");
- priv->mesh_ssid_len = 4;
priv->wol_criteria = 0xffffffff;
priv->wol_gpio = 0xff;
@@ -1076,6 +1045,17 @@ void lbs_remove_card(struct lbs_private *priv)
EXPORT_SYMBOL_GPL(lbs_remove_card);
+static int lbs_rtap_supported(struct lbs_private *priv)
+{
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
+ return 1;
+
+ /* newer firmware use a capability mask */
+ return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK));
+}
+
+
int lbs_start_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
@@ -1095,12 +1075,14 @@ int lbs_start_card(struct lbs_private *priv)
lbs_update_channel(priv);
+ lbs_init_mesh(priv);
+
/*
* While rtap isn't related to mesh, only mesh-enabled
* firmware implements the rtap functionality via
* CMD_802_11_MONITOR_MODE.
*/
- if (lbs_init_mesh(priv)) {
+ if (lbs_rtap_supported(priv)) {
if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
lbs_pr_err("cannot register lbs_rtap attribute\n");
}
@@ -1134,7 +1116,9 @@ void lbs_stop_card(struct lbs_private *priv)
netif_carrier_off(dev);
lbs_debugfs_remove_one(priv);
- if (lbs_deinit_mesh(priv))
+ lbs_deinit_mesh(priv);
+
+ if (lbs_rtap_supported(priv))
device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
/* Delete the timeout of the currently processing command */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 2f91c9b808af..e385af1f4583 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,7 +1,7 @@
-#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/kthread.h>
#include <linux/kfifo.h>
@@ -196,7 +196,14 @@ int lbs_init_mesh(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MESH);
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ priv->mesh_connect_status = LBS_DISCONNECTED;
+
+ /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
+ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
+ /* 5.110.22 have mesh command with 0xa3 command id */
+ /* 10.0.0.p0 FW brings in mesh config command with different id */
+ /* Check FW version MSB and initialize mesh_fw_ver */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
/* Enable mesh, if supported, and work out which TLV it uses.
0x100 + 291 is an unofficial value used in 5.110.20.pXX
0x100 + 37 is the official value used in 5.110.21.pXX
@@ -218,7 +225,9 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else
+ if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
/* 10.0.0.pXX new firmwares should succeed with TLV
* 0x100+37; Do not invoke command with old TLV.
*/
@@ -227,7 +236,12 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
+
+
if (priv->mesh_tlv) {
+ sprintf(priv->mesh_ssid, "mesh");
+ priv->mesh_ssid_len = 4;
+
lbs_add_mesh(priv);
if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
@@ -351,8 +365,7 @@ int lbs_add_mesh(struct lbs_private *priv)
mesh_dev->netdev_ops = &mesh_netdev_ops;
mesh_dev->ethtool_ops = &lbs_ethtool_ops;
- memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
- sizeof(priv->dev->dev_addr));
+ memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
@@ -416,10 +429,10 @@ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
struct net_device *dev, struct rxpd *rxpd)
{
if (priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
if (rxpd->rx_control & RxPD_MESH_FRAME)
dev = priv->mesh_dev;
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
dev = priv->mesh_dev;
}
@@ -432,9 +445,9 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
struct net_device *dev, struct txpd *txpd)
{
if (dev == priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD)
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
- else if (priv->mesh_fw_ver == MESH_FW_NEW)
+ else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
txpd->u.bss.bss_num = MESH_IFACE_ID;
}
}
@@ -538,7 +551,7 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
* Command id is 0xac for v10 FW along with mesh interface
* id in bits 14-13-12.
*/
- if (priv->mesh_fw_ver == MESH_FW_NEW)
+ if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
command = CMD_MESH_CONFIG |
(MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index fea9b5d005fc..e2573303a328 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,6 +9,8 @@
#include <net/lib80211.h>
+#ifdef CONFIG_LIBERTAS_MESH
+
/* Mesh statistics */
struct lbs_mesh_stats {
u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
@@ -46,11 +48,20 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
/* Command handling */
struct cmd_ds_command;
+struct cmd_ds_mesh_access;
+struct cmd_ds_mesh_config;
int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
+int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+ struct cmd_ds_mesh_access *cmd);
+int lbs_mesh_config_send(struct lbs_private *priv,
+ struct cmd_ds_mesh_config *cmd,
+ uint16_t action, uint16_t type);
+int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+
/* Persistent configuration */
@@ -75,4 +86,25 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *s);
+/* Accessors */
+
+#define lbs_mesh_open(priv) (priv->mesh_open)
+#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
+
+#else
+
+#define lbs_init_mesh(priv)
+#define lbs_deinit_mesh(priv)
+#define lbs_add_mesh(priv)
+#define lbs_remove_mesh(priv)
+#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
+#define lbs_mesh_set_txpd(priv, dev, txpd)
+#define lbs_mesh_config(priv, enable, chan)
+#define lbs_mesh_open(priv) (0)
+#define lbs_mesh_connected(priv) (0)
+
+#endif
+
+
+
#endif
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 2daf8ffdb7e1..784dae714705 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -2,6 +2,7 @@
* This file contains the handling of RX in wlan driver.
*/
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "host.h"
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index c6a6c042b82f..24cd54b3a806 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -4,6 +4,7 @@
* IOCTL handlers as well as command preperation and response routines
* for sending scan commands to the firmware.
*/
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
@@ -567,11 +568,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
chan_count = lbs_scan_create_channel_list(priv, chan_list);
netif_stop_queue(priv->dev);
- netif_carrier_off(priv->dev);
- if (priv->mesh_dev) {
+ if (priv->mesh_dev)
netif_stop_queue(priv->mesh_dev);
- netif_carrier_off(priv->mesh_dev);
- }
/* Prepare to continue an interrupted scan */
lbs_deb_scan("chan_count %d, scan_channel %d\n",
@@ -635,16 +633,13 @@ out2:
priv->scan_channel = 0;
out:
- if (priv->connect_status == LBS_CONNECTED) {
- netif_carrier_on(priv->dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->dev);
- }
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) {
- netif_carrier_on(priv->mesh_dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
- }
+ if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
+ netif_wake_queue(priv->dev);
+
+ if (priv->mesh_dev && lbs_mesh_connected(priv) &&
+ !priv->tx_pending_len)
+ netif_wake_queue(priv->mesh_dev);
+
kfree(chan_list);
lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 315d1ce286ca..52d244ea3d97 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -198,7 +198,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED))
+ if (priv->mesh_dev && lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index a8eb9e1fcf36..9b555884b08a 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -2,6 +2,7 @@
* This file contains ioctl functions
*/
#include <linux/ctype.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/if.h>
#include <linux/if_arp.h>
@@ -192,7 +193,7 @@ static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
lbs_deb_enter(LBS_DEB_WEXT);
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
memcpy(rates, lbs_bg_rates, MAX_RATES);
else
memcpy(rates, priv->curbssparams.rates, MAX_RATES);
@@ -298,6 +299,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
@@ -307,7 +309,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
/* Use nickname to indicate that mesh is on */
- if (priv->mesh_connect_status == LBS_CONNECTED) {
+ if (lbs_mesh_connected(priv)) {
strncpy(extra, "Mesh", 12);
extra[12] = '\0';
dwrq->length = strlen(extra);
@@ -321,6 +323,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -422,6 +425,7 @@ static int lbs_get_mode(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_wlan_get_mode(struct net_device *dev,
struct iw_request_info *info, u32 * uwrq,
char *extra)
@@ -433,6 +437,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_get_txpow(struct net_device *dev,
struct iw_request_info *info,
@@ -863,7 +868,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
/* If we're not associated, all quality values are meaningless */
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
goto out;
/* Quality by RSSI */
@@ -1010,6 +1015,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_set_freq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
@@ -1061,6 +1067,7 @@ out:
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -2025,10 +2032,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
if (priv->connect_status == LBS_CONNECTED) {
memcpy(extra, priv->curbssparams.ssid,
priv->curbssparams.ssid_len);
- extra[priv->curbssparams.ssid_len] = '\0';
} else {
memset(extra, 0, 32);
- extra[priv->curbssparams.ssid_len] = '\0';
}
/*
* If none, we may want to get the one that was set
@@ -2110,6 +2115,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_get_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
@@ -2163,6 +2169,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
/**
* @brief Connect to the AP or Ad-hoc Network with specific bssid
@@ -2269,7 +2276,13 @@ static const iw_handler lbs_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
+struct iw_handler_def lbs_handler_def = {
+ .num_standard = ARRAY_SIZE(lbs_handler),
+ .standard = (iw_handler *) lbs_handler,
+ .get_wireless_stats = lbs_get_wireless_stats,
+};
+#ifdef CONFIG_LIBERTAS_MESH
static const iw_handler mesh_wlan_handler[] = {
(iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) lbs_get_name, /* SIOCGIWNAME */
@@ -2327,14 +2340,10 @@ static const iw_handler mesh_wlan_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
-struct iw_handler_def lbs_handler_def = {
- .num_standard = ARRAY_SIZE(lbs_handler),
- .standard = (iw_handler *) lbs_handler,
- .get_wireless_stats = lbs_get_wireless_stats,
-};
struct iw_handler_def mesh_handler_def = {
.num_standard = ARRAY_SIZE(mesh_wlan_handler),
.standard = (iw_handler *) mesh_wlan_handler,
.get_wireless_stats = lbs_get_wireless_stats,
};
+#endif
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index 28790e03dc43..b620daf59ef7 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -7,6 +7,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#include <linux/slab.h>
+
#include "libertas_tf.h"
static const struct channel_range channel_ranges[] = {
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 3691c307e674..8cc9db60c14b 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -11,6 +11,7 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#define DRV_NAME "lbtf_usb"
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 019431d2f8a9..7945ff5aa334 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -7,6 +7,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#include <linux/slab.h>
+
#include "libertas_tf.h"
#include "linux/etherdevice.h"
@@ -318,14 +320,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
if (priv->vif != NULL)
return -EOPNOTSUPP;
- priv->vif = conf->vif;
- switch (conf->type) {
+ priv->vif = vif;
+ switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
lbtf_set_mode(priv, LBTF_AP_MODE);
@@ -337,12 +339,12 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
priv->vif = NULL;
return -EOPNOTSUPP;
}
- lbtf_set_mac_address(priv, (u8 *) conf->mac_addr);
+ lbtf_set_mac_address(priv, (u8 *) vif->addr);
return 0;
}
static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
@@ -495,7 +497,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
stats.noise = prxpd->nf;
- stats.qual = prxpd->snr - prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
@@ -556,6 +557,9 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
SET_IEEE80211_DEV(hw, dmdev);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 88e41176e7fd..7cd5f56662fc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -14,6 +14,7 @@
*/
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <net/dst.h>
#include <net/xfrm.h>
@@ -32,6 +33,10 @@ static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
+static bool fake_hw_scan;
+module_param(fake_hw_scan, bool, 0444);
+MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -281,6 +286,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
+ struct mac_address addresses[2];
+
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
@@ -436,6 +443,38 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
}
+struct mac80211_hwsim_addr_match_data {
+ bool ret;
+ const u8 *addr;
+};
+
+static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_addr_match_data *md = data;
+ if (memcmp(mac, md->addr, ETH_ALEN) == 0)
+ md->ret = true;
+}
+
+
+static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
+ const u8 *addr)
+{
+ struct mac80211_hwsim_addr_match_data md;
+
+ if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
+ return true;
+
+ md.ret = false;
+ md.addr = addr;
+ ieee80211_iterate_active_interfaces_atomic(data->hw,
+ mac80211_hwsim_addr_iter,
+ &md);
+
+ return md.ret;
+}
+
+
static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -488,8 +527,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
if (nskb == NULL)
continue;
- if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr,
- ETH_ALEN) == 0)
+ if (mac80211_hwsim_addr_match(data2, hdr->addr1))
ack = true;
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -553,24 +591,24 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_set_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_set_magic(vif);
return 0;
}
static void mac80211_hwsim_remove_interface(
- struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf)
+ struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_check_magic(conf->vif);
- hwsim_clear_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_check_magic(vif);
+ hwsim_clear_magic(vif);
}
@@ -618,12 +656,26 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
-
- printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n",
+ static const char *chantypes[4] = {
+ [NL80211_CHAN_NO_HT] = "noht",
+ [NL80211_CHAN_HT20] = "ht20",
+ [NL80211_CHAN_HT40MINUS] = "ht40-",
+ [NL80211_CHAN_HT40PLUS] = "ht40+",
+ };
+ static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
+ [IEEE80211_SMPS_AUTOMATIC] = "auto",
+ [IEEE80211_SMPS_OFF] = "off",
+ [IEEE80211_SMPS_STATIC] = "static",
+ [IEEE80211_SMPS_DYNAMIC] = "dynamic",
+ };
+
+ printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
wiphy_name(hw->wiphy), __func__,
conf->channel->center_freq,
+ chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
- !!(conf->flags & IEEE80211_CONF_PS));
+ !!(conf->flags & IEEE80211_CONF_PS),
+ smps_modes[conf->smps_mode]);
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
@@ -720,23 +772,41 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
}
}
+static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ hwsim_check_magic(vif);
+ hwsim_set_sta_magic(sta);
+
+ return 0;
+}
+
+static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ hwsim_check_magic(vif);
+ hwsim_clear_sta_magic(sta);
+
+ return 0;
+}
+
static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
hwsim_check_magic(vif);
+
switch (cmd) {
- case STA_NOTIFY_ADD:
- hwsim_set_sta_magic(sta);
- break;
- case STA_NOTIFY_REMOVE:
- hwsim_clear_sta_magic(sta);
- break;
case STA_NOTIFY_SLEEP:
case STA_NOTIFY_AWAKE:
/* TODO: make good use of these flags */
break;
+ default:
+ WARN(1, "Invalid sta notify: %d\n", cmd);
+ break;
}
}
@@ -827,7 +897,77 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
-static const struct ieee80211_ops mac80211_hwsim_ops =
+static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
+{
+ /*
+ * In this special case, there's nothing we need to
+ * do because hwsim does transmission synchronously.
+ * In the future, when it does transmissions via
+ * userspace, we may need to do something.
+ */
+}
+
+struct hw_scan_done {
+ struct delayed_work w;
+ struct ieee80211_hw *hw;
+};
+
+static void hw_scan_done(struct work_struct *work)
+{
+ struct hw_scan_done *hsd =
+ container_of(work, struct hw_scan_done, w.work);
+
+ ieee80211_scan_completed(hsd->hw, false);
+ kfree(hsd);
+}
+
+static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
+{
+ struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
+ int i;
+
+ if (!hsd)
+ return -ENOMEM;
+
+ hsd->hw = hw;
+ INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
+
+ printk(KERN_DEBUG "hwsim scan request\n");
+ for (i = 0; i < req->n_channels; i++)
+ printk(KERN_DEBUG "hwsim scan freq %d\n",
+ req->channels[i]->center_freq);
+
+ ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
+
+ return 0;
+}
+
+static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
@@ -837,10 +977,14 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
.config = mac80211_hwsim_config,
.configure_filter = mac80211_hwsim_configure_filter,
.bss_info_changed = mac80211_hwsim_bss_info_changed,
+ .sta_add = mac80211_hwsim_sta_add,
+ .sta_remove = mac80211_hwsim_sta_remove,
.sta_notify = mac80211_hwsim_sta_notify,
.set_tim = mac80211_hwsim_set_tim,
.conf_tx = mac80211_hwsim_conf_tx,
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
+ .ampdu_action = mac80211_hwsim_ampdu_action,
+ .flush = mac80211_hwsim_flush,
};
@@ -1035,6 +1179,9 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
+ if (fake_hw_scan)
+ mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -1072,7 +1219,11 @@ static int __init init_mac80211_hwsim(void)
SET_IEEE80211_DEV(hw, data->dev);
addr[3] = i >> 8;
addr[4] = i;
- SET_IEEE80211_PERM_ADDR(hw, addr);
+ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ data->addresses[1].addr[0] |= 0x40;
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
hw->channel_change_time = 1;
hw->queues = 4;
@@ -1082,7 +1233,9 @@ static int __init init_mac80211_hwsim(void)
BIT(NL80211_IFTYPE_MESH_POINT);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM;
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59d49159cf2a..12fdcb25fd38 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
* drivers/net/wireless/mwl8k.c
* Driver for Marvell TOPDOG 802.11 Wireless cards
*
- * Copyright (C) 2008-2009 Marvell Semiconductor Inc.
+ * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <net/mac80211.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
@@ -26,7 +27,7 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.10"
+#define MWL8K_VERSION "0.12"
/* Register definitions */
#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -92,8 +93,7 @@ struct mwl8k_device_info {
char *part_name;
char *helper_image;
char *fw_image;
- struct rxd_ops *rxd_ops;
- u16 modes;
+ struct rxd_ops *ap_rxd_ops;
};
struct mwl8k_rx_queue {
@@ -120,34 +120,36 @@ struct mwl8k_tx_queue {
/* sw appends here */
int tail;
- struct ieee80211_tx_queue_stats stats;
+ unsigned int len;
struct mwl8k_tx_desc *txd;
dma_addr_t txd_dma;
struct sk_buff **skb;
};
-/* Pointers to the firmware data and meta information about it. */
-struct mwl8k_firmware {
- /* Boot helper code */
- struct firmware *helper;
+struct mwl8k_priv {
+ struct ieee80211_hw *hw;
+ struct pci_dev *pdev;
- /* Microcode */
- struct firmware *ucode;
-};
+ struct mwl8k_device_info *device_info;
-struct mwl8k_priv {
void __iomem *sram;
void __iomem *regs;
- struct ieee80211_hw *hw;
- struct pci_dev *pdev;
+ /* firmware */
+ struct firmware *fw_helper;
+ struct firmware *fw_ucode;
- struct mwl8k_device_info *device_info;
+ /* hardware/firmware parameters */
bool ap_fw;
struct rxd_ops *rxd_ops;
-
- /* firmware files and meta data */
- struct mwl8k_firmware fw;
+ struct ieee80211_supported_band band_24;
+ struct ieee80211_channel channels_24[14];
+ struct ieee80211_rate rates_24[14];
+ struct ieee80211_supported_band band_50;
+ struct ieee80211_channel channels_50[4];
+ struct ieee80211_rate rates_50[9];
+ u32 ap_macids_supported;
+ u32 sta_macids_supported;
/* firmware access */
struct mutex fw_mutex;
@@ -161,9 +163,9 @@ struct mwl8k_priv {
/* TX quiesce completion, protected by fw_mutex and tx_lock */
struct completion *tx_wait;
- struct ieee80211_vif *vif;
-
- struct ieee80211_channel *current_channel;
+ /* List of interfaces. */
+ u32 macids_used;
+ struct list_head vif_list;
/* power management status cookie from firmware */
u32 *cookie;
@@ -182,11 +184,6 @@ struct mwl8k_priv {
struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
- /* PHY parameters */
- struct ieee80211_supported_band band;
- struct ieee80211_channel channels[14];
- struct ieee80211_rate rates[14];
-
bool radio_on;
bool radio_short_preamble;
bool sniffer_enabled;
@@ -205,32 +202,33 @@ struct mwl8k_priv {
*/
struct work_struct finalize_join_worker;
- /* Tasklet to reclaim TX descriptors and buffers after tx */
- struct tasklet_struct tx_reclaim_task;
+ /* Tasklet to perform TX reclaim. */
+ struct tasklet_struct poll_tx_task;
+
+ /* Tasklet to perform RX. */
+ struct tasklet_struct poll_rx_task;
};
/* Per interface specific private data */
struct mwl8k_vif {
- /* backpointer to parent config block */
- struct mwl8k_priv *priv;
-
- /* BSS config of AP or IBSS from mac80211*/
- struct ieee80211_bss_conf bss_info;
-
- /* BSSID of AP or IBSS */
- u8 bssid[ETH_ALEN];
- u8 mac_addr[ETH_ALEN];
+ struct list_head list;
+ struct ieee80211_vif *vif;
- /* Index into station database.Returned by update_sta_db call */
- u8 peer_id;
+ /* Firmware macid for this vif. */
+ int macid;
- /* Non AMPDU sequence number assigned by driver */
- u16 seqno;
+ /* Non AMPDU sequence number assigned by driver. */
+ u16 seqno;
};
-
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
-static const struct ieee80211_channel mwl8k_channels[] = {
+struct mwl8k_sta {
+ /* Index into station database. Returned by UPDATE_STADB. */
+ u8 peer_id;
+};
+#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
+
+static const struct ieee80211_channel mwl8k_channels_24[] = {
{ .center_freq = 2412, .hw_value = 1, },
{ .center_freq = 2417, .hw_value = 2, },
{ .center_freq = 2422, .hw_value = 3, },
@@ -242,9 +240,12 @@ static const struct ieee80211_channel mwl8k_channels[] = {
{ .center_freq = 2452, .hw_value = 9, },
{ .center_freq = 2457, .hw_value = 10, },
{ .center_freq = 2462, .hw_value = 11, },
+ { .center_freq = 2467, .hw_value = 12, },
+ { .center_freq = 2472, .hw_value = 13, },
+ { .center_freq = 2484, .hw_value = 14, },
};
-static const struct ieee80211_rate mwl8k_rates[] = {
+static const struct ieee80211_rate mwl8k_rates_24[] = {
{ .bitrate = 10, .hw_value = 2, },
{ .bitrate = 20, .hw_value = 4, },
{ .bitrate = 55, .hw_value = 11, },
@@ -261,8 +262,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
{ .bitrate = 720, .hw_value = 144, },
};
-static const u8 mwl8k_rateids[12] = {
- 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108,
+static const struct ieee80211_channel mwl8k_channels_50[] = {
+ { .center_freq = 5180, .hw_value = 36, },
+ { .center_freq = 5200, .hw_value = 40, },
+ { .center_freq = 5220, .hw_value = 44, },
+ { .center_freq = 5240, .hw_value = 48, },
+};
+
+static const struct ieee80211_rate mwl8k_rates_50[] = {
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+ { .bitrate = 720, .hw_value = 144, },
};
/* Set or get info from Firmware */
@@ -278,6 +294,7 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_RF_ANTENNA 0x0020
+#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
#define MWL8K_CMD_SET_PRE_SCAN 0x0107
#define MWL8K_CMD_SET_POST_SCAN 0x0108
#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -291,8 +308,10 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_MIMO_CONFIG 0x0125
#define MWL8K_CMD_USE_FIXED_RATE 0x0126
#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
-#define MWL8K_CMD_SET_MAC_ADDR 0x0202
+#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
+#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
+#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -310,6 +329,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(RADIO_CONTROL);
MWL8K_CMDNAME(RF_TX_POWER);
MWL8K_CMDNAME(RF_ANTENNA);
+ MWL8K_CMDNAME(SET_BEACON);
MWL8K_CMDNAME(SET_PRE_SCAN);
MWL8K_CMDNAME(SET_POST_SCAN);
MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -325,6 +345,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(ENABLE_SNIFFER);
MWL8K_CMDNAME(SET_MAC_ADDR);
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
+ MWL8K_CMDNAME(BSS_START);
+ MWL8K_CMDNAME(SET_NEW_STN);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -355,8 +377,8 @@ static void mwl8k_release_fw(struct firmware **fw)
static void mwl8k_release_firmware(struct mwl8k_priv *priv)
{
- mwl8k_release_fw(&priv->fw.ucode);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_ucode);
+ mwl8k_release_fw(&priv->fw_helper);
}
/* Request fw image */
@@ -377,7 +399,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
int rc;
if (di->helper_image != NULL) {
- rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper);
+ rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
if (rc) {
printk(KERN_ERR "%s: Error requesting helper "
"firmware file %s\n", pci_name(priv->pdev),
@@ -386,24 +408,22 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
}
}
- rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode);
+ rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
if (rc) {
printk(KERN_ERR "%s: Error requesting firmware file %s\n",
pci_name(priv->pdev), di->fw_image);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_helper);
return rc;
}
return 0;
}
-MODULE_FIRMWARE("mwl8k/helper_8687.fw");
-MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
-
struct mwl8k_cmd_pkt {
__le16 code;
__le16 length;
- __le16 seq_num;
+ __u8 seq_num;
+ __u8 macid;
__le16 result;
char payload[0];
} __attribute__((packed));
@@ -461,6 +481,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
cmd->seq_num = 0;
+ cmd->macid = 0;
cmd->result = 0;
done = 0;
@@ -551,13 +572,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
static int mwl8k_load_firmware(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
- struct firmware *fw = priv->fw.ucode;
- struct mwl8k_device_info *di = priv->device_info;
+ struct firmware *fw = priv->fw_ucode;
int rc;
int loops;
if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
- struct firmware *helper = priv->fw.helper;
+ struct firmware *helper = priv->fw_helper;
if (helper == NULL) {
printk(KERN_ERR "%s: helper image needed but none "
@@ -584,10 +604,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
return rc;
}
- if (di->modes & BIT(NL80211_IFTYPE_AP))
- iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
- else
- iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
+ iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
loops = 500000;
do {
@@ -610,91 +627,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
}
-/*
- * Defines shared between transmission and reception.
- */
-/* HT control fields for firmware */
-struct ewc_ht_info {
- __le16 control1;
- __le16 control2;
- __le16 control3;
-} __attribute__((packed));
-
-/* Firmware Station database operations */
-#define MWL8K_STA_DB_ADD_ENTRY 0
-#define MWL8K_STA_DB_MODIFY_ENTRY 1
-#define MWL8K_STA_DB_DEL_ENTRY 2
-#define MWL8K_STA_DB_FLUSH 3
-
-/* Peer Entry flags - used to define the type of the peer node */
-#define MWL8K_PEER_TYPE_ACCESSPOINT 2
-
-struct peer_capability_info {
- /* Peer type - AP vs. STA. */
- __u8 peer_type;
-
- /* Basic 802.11 capabilities from assoc resp. */
- __le16 basic_caps;
-
- /* Set if peer supports 802.11n high throughput (HT). */
- __u8 ht_support;
-
- /* Valid if HT is supported. */
- __le16 ht_caps;
- __u8 extended_ht_caps;
- struct ewc_ht_info ewc_info;
-
- /* Legacy rate table. Intersection of our rates and peer rates. */
- __u8 legacy_rates[12];
-
- /* HT rate table. Intersection of our rates and peer rates. */
- __u8 ht_rates[16];
- __u8 pad[16];
-
- /* If set, interoperability mode, no proprietary extensions. */
- __u8 interop;
- __u8 pad2;
- __u8 station_id;
- __le16 amsdu_enabled;
-} __attribute__((packed));
-
-/* Inline functions to manipulate QoS field in data descriptor. */
-static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
-{
- u16 val_mask = 1 << 4;
-
- /* End of Service Period Bit 4 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
-{
- u16 val_mask = 0x3;
- u8 shift = 5;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Ack Policy Bit 5-6 */
- return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
-}
-
-static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
-{
- u16 val_mask = 1 << 7;
-
- /* AMSDU present Bit 7 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
-{
- u16 val_mask = 0xff;
- u8 shift = 8;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Queue Length Bits 8-15 */
- return (qos & qos_mask) | ((len & val_mask) << shift);
-}
-
/* DMA header used by firmware and hardware. */
struct mwl8k_dma_data {
__le16 fwlen;
@@ -761,9 +693,9 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
/*
- * Packet reception for 88w8366.
+ * Packet reception for 88w8366 AP firmware.
*/
-struct mwl8k_rxd_8366 {
+struct mwl8k_rxd_8366_ap {
__le16 pkt_len;
__u8 sq2;
__u8 rate;
@@ -781,23 +713,23 @@ struct mwl8k_rxd_8366 {
__u8 rx_ctrl;
} __attribute__((packed));
-#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80
-#define MWL8K_8366_RATE_INFO_40MHZ 0x40
-#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f)
+#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
+#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
+#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
-#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80
+#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
-static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -806,12 +738,12 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
- __le16 *qos)
+mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
+ __le16 *qos)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
- if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -820,23 +752,29 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_floor;
- if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) {
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
- if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ)
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate);
+ status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
- for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
- if (mwl8k_rates[i].hw_value == rxd->rate) {
+ for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
+ if (mwl8k_rates_24[i].hw_value == rxd->rate) {
status->rate_idx = i;
break;
}
}
}
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -844,17 +782,17 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8366_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8366),
- .rxd_init = mwl8k_rxd_8366_init,
- .rxd_refill = mwl8k_rxd_8366_refill,
- .rxd_process = mwl8k_rxd_8366_process,
+static struct rxd_ops rxd_8366_ap_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
+ .rxd_init = mwl8k_rxd_8366_ap_init,
+ .rxd_refill = mwl8k_rxd_8366_ap_refill,
+ .rxd_process = mwl8k_rxd_8366_ap_process,
};
/*
- * Packet reception for 88w8687.
+ * Packet reception for STA firmware.
*/
-struct mwl8k_rxd_8687 {
+struct mwl8k_rxd_sta {
__le16 pkt_len;
__u8 link_quality;
__u8 noise_level;
@@ -871,26 +809,26 @@ struct mwl8k_rxd_8687 {
__u8 pad2[2];
} __attribute__((packed));
-#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000
-#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
-#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
-#define MWL8K_8687_RATE_INFO_40MHZ 0x0004
-#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002
-#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001
+#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
+#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
+#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
+#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
+#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
+#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
-#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02
+#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
-static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -899,13 +837,13 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
+mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
__le16 *qos)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
u16 rate_info;
- if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -915,19 +853,25 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_level;
- status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
- status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
+ status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
+ status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
status->flag |= RX_FLAG_SHORTPRE;
- if (rate_info & MWL8K_8687_RATE_INFO_40MHZ)
+ if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
status->flag |= RX_FLAG_SHORT_GI;
- if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT)
+ if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
status->flag |= RX_FLAG_HT;
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -935,11 +879,11 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8687_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8687),
- .rxd_init = mwl8k_rxd_8687_init,
- .rxd_refill = mwl8k_rxd_8687_refill,
- .rxd_process = mwl8k_rxd_8687_process,
+static struct rxd_ops rxd_sta_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_sta),
+ .rxd_init = mwl8k_rxd_sta_init,
+ .rxd_refill = mwl8k_rxd_sta_refill,
+ .rxd_process = mwl8k_rxd_sta_process,
};
@@ -1153,16 +1097,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
* Packet transmission.
*/
-/* Transmit packet ACK policy */
-#define MWL8K_TXD_ACK_POLICY_NORMAL 0
-#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
-
#define MWL8K_TXD_STATUS_OK 0x00000001
#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
+#define MWL8K_QOS_QLEN_UNSPEC 0xff00
+#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
+#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
+#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
+#define MWL8K_QOS_EOSP 0x0010
+
struct mwl8k_tx_desc {
__le32 status;
__u8 data_rate;
@@ -1187,8 +1133,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
int size;
int i;
- memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
- txq->stats.limit = MWL8K_TX_DESCS;
+ txq->len = 0;
txq->head = 0;
txq->tail = 0;
@@ -1264,7 +1209,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d "
"fw_owned=%d drv_owned=%d unused=%d\n",
wiphy_name(hw->wiphy), i,
- txq->stats.len, txq->head, txq->tail,
+ txq->len, txq->head, txq->tail,
fw_owned, drv_owned, unused);
}
}
@@ -1272,7 +1217,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
/*
* Must be called with priv->fw_mutex held and tx queues stopped.
*/
-#define MWL8K_TX_WAIT_TIMEOUT_MS 1000
+#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
{
@@ -1316,8 +1261,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
}
if (priv->pending_tx_pkts < oldcount) {
- printk(KERN_NOTICE "%s: timeout waiting for tx "
- "rings to drain (%d -> %d pkts), retrying\n",
+ printk(KERN_NOTICE "%s: waiting for tx rings "
+ "to drain (%d -> %d pkts)\n",
wiphy_name(hw->wiphy), oldcount,
priv->pending_tx_pkts);
retry = 1;
@@ -1342,13 +1287,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
MWL8K_TXD_STATUS_OK_RETRY | \
MWL8K_TXD_STATUS_OK_MORE_RETRY))
-static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
+static int
+mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- int wake = 0;
+ int processed;
- while (txq->stats.len > 0) {
+ processed = 0;
+ while (txq->len > 0 && limit--) {
int tx;
struct mwl8k_tx_desc *tx_desc;
unsigned long addr;
@@ -1370,8 +1317,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
}
txq->head = (tx + 1) % MWL8K_TX_DESCS;
- BUG_ON(txq->stats.len == 0);
- txq->stats.len--;
+ BUG_ON(txq->len == 0);
+ txq->len--;
priv->pending_tx_pkts--;
addr = le32_to_cpu(tx_desc->pkt_phys_addr);
@@ -1395,11 +1342,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
ieee80211_tx_status_irqsafe(hw, skb);
- wake = 1;
+ processed++;
}
- if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
+ if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
ieee80211_wake_queue(hw, index);
+
+ return processed;
}
/* must be called only when the card's transmit is completely halted */
@@ -1408,7 +1357,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- mwl8k_txq_reclaim(hw, index, 1);
+ mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
kfree(txq->skb);
txq->skb = NULL;
@@ -1446,11 +1395,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- u16 seqno = mwl8k_vif->seqno;
-
wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- wh->seq_ctrl |= cpu_to_le16(seqno << 4);
- mwl8k_vif->seqno = seqno++ % 4096;
+ wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
+ mwl8k_vif->seqno += 0x10;
}
/* Setup firmware control bit fields for each frame type. */
@@ -1459,24 +1406,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
if (ieee80211_is_mgmt(wh->frame_control) ||
ieee80211_is_ctl(wh->frame_control)) {
txdatarate = 0;
- qos = mwl8k_qos_setbit_eosp(qos);
- /* Set Queue size to unspecified */
- qos = mwl8k_qos_setbit_qlen(qos, 0xff);
+ qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
} else if (ieee80211_is_data(wh->frame_control)) {
txdatarate = 1;
if (is_multicast_ether_addr(wh->addr1))
txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
- /* Send pkt in an aggregate if AMPDU frame. */
+ qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_BLOCKACK);
+ qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
else
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_NORMAL);
-
- if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
- qos = mwl8k_qos_setbit_amsdu(qos);
+ qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
}
dma = pci_map_single(priv->pdev, skb->data,
@@ -1503,12 +1443,14 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
tx->pkt_phys_addr = cpu_to_le32(dma);
tx->pkt_len = cpu_to_le16(skb->len);
tx->rate_info = 0;
- tx->peer_id = mwl8k_vif->peer_id;
+ if (!priv->ap_fw && tx_info->control.sta != NULL)
+ tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+ else
+ tx->peer_id = 0;
wmb();
tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
- txq->stats.count++;
- txq->stats.len++;
+ txq->len++;
priv->pending_tx_pkts++;
txq->tail++;
@@ -1656,6 +1598,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
return rc;
}
+static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct mwl8k_cmd_pkt *cmd)
+{
+ if (vif != NULL)
+ cmd->macid = MWL8K_VIF(vif)->macid;
+ return mwl8k_post_cmd(hw, cmd);
+}
+
+/*
+ * Setup code shared between STA and AP firmware images.
+ */
+static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
+ memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
+
+ BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
+ memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
+
+ priv->band_24.band = IEEE80211_BAND_2GHZ;
+ priv->band_24.channels = priv->channels_24;
+ priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
+ priv->band_24.bitrates = priv->rates_24;
+ priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+}
+
+static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
+ memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
+
+ BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
+ memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
+
+ priv->band_50.band = IEEE80211_BAND_5GHZ;
+ priv->band_50.channels = priv->channels_50;
+ priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
+ priv->band_50.bitrates = priv->rates_50;
+ priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
+
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+}
+
/*
* CMD_GET_HW_SPEC (STA version).
*/
@@ -1678,6 +1670,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
__le32 total_rxd;
} __attribute__((packed));
+#define MWL8K_CAP_MAX_AMSDU 0x20000000
+#define MWL8K_CAP_GREENFIELD 0x08000000
+#define MWL8K_CAP_AMPDU 0x04000000
+#define MWL8K_CAP_RX_STBC 0x01000000
+#define MWL8K_CAP_TX_STBC 0x00800000
+#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
+#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
+#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
+#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
+#define MWL8K_CAP_DELAY_BA 0x00003000
+#define MWL8K_CAP_MIMO 0x00000200
+#define MWL8K_CAP_40MHZ 0x00000100
+#define MWL8K_CAP_BAND_MASK 0x00000007
+#define MWL8K_CAP_5GHZ 0x00000004
+#define MWL8K_CAP_2GHZ4 0x00000001
+
+static void
+mwl8k_set_ht_caps(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *band, u32 cap)
+{
+ int rx_streams;
+ int tx_streams;
+
+ band->ht_cap.ht_supported = 1;
+
+ if (cap & MWL8K_CAP_MAX_AMSDU)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+ if (cap & MWL8K_CAP_GREENFIELD)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
+ if (cap & MWL8K_CAP_AMPDU) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ }
+ if (cap & MWL8K_CAP_RX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
+ if (cap & MWL8K_CAP_TX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+ if (cap & MWL8K_CAP_SHORTGI_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ if (cap & MWL8K_CAP_SHORTGI_20MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ if (cap & MWL8K_CAP_DELAY_BA)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
+ if (cap & MWL8K_CAP_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
+ tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
+
+ band->ht_cap.mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ band->ht_cap.mcs.rx_mask[1] = 0xff;
+ if (rx_streams >= 3)
+ band->ht_cap.mcs.rx_mask[2] = 0xff;
+ band->ht_cap.mcs.rx_mask[4] = 0x01;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ if (rx_streams != tx_streams) {
+ band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ }
+}
+
+static void
+mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
+ mwl8k_setup_2ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_24, caps);
+ }
+
+ if (caps & MWL8K_CAP_5GHZ) {
+ mwl8k_setup_5ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_50, caps);
+ }
+}
+
static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
@@ -1708,6 +1783,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
+ priv->ap_macids_supported = 0x00000000;
+ priv->sta_macids_supported = 0x00000001;
}
kfree(cmd);
@@ -1761,6 +1839,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_setup_2ghz_band(hw);
+ priv->ap_macids_supported = 0x000000ff;
+ priv->sta_macids_supported = 0x00000000;
off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1806,7 +1887,9 @@ struct mwl8k_cmd_set_hw_spec {
__le32 total_rxd;
} __attribute__((packed));
-#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
{
@@ -1827,7 +1910,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
- cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
+ cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
@@ -1897,9 +1982,9 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
}
/*
- * CMD_802_11_GET_STAT.
+ * CMD_GET_STAT.
*/
-struct mwl8k_cmd_802_11_get_stat {
+struct mwl8k_cmd_get_stat {
struct mwl8k_cmd_pkt header;
__le32 stats[64];
} __attribute__((packed));
@@ -1909,10 +1994,10 @@ struct mwl8k_cmd_802_11_get_stat {
#define MWL8K_STAT_FCS_ERROR 24
#define MWL8K_STAT_RTS_SUCCESS 11
-static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
+static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- struct mwl8k_cmd_802_11_get_stat *cmd;
+ struct mwl8k_cmd_get_stat *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1939,9 +2024,9 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
}
/*
- * CMD_802_11_RADIO_CONTROL.
+ * CMD_RADIO_CONTROL.
*/
-struct mwl8k_cmd_802_11_radio_control {
+struct mwl8k_cmd_radio_control {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 control;
@@ -1949,10 +2034,10 @@ struct mwl8k_cmd_802_11_radio_control {
} __attribute__((packed));
static int
-mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
+mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_802_11_radio_control *cmd;
+ struct mwl8k_cmd_radio_control *cmd;
int rc;
if (enable == priv->radio_on && !force)
@@ -1977,36 +2062,32 @@ mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
return rc;
}
-static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 0, 0);
+ return mwl8k_cmd_radio_control(hw, 0, 0);
}
-static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 1, 0);
+ return mwl8k_cmd_radio_control(hw, 1, 0);
}
static int
mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
{
- struct mwl8k_priv *priv;
-
- if (hw == NULL || hw->priv == NULL)
- return -EINVAL;
- priv = hw->priv;
+ struct mwl8k_priv *priv = hw->priv;
priv->radio_short_preamble = short_preamble;
- return mwl8k_cmd_802_11_radio_control(hw, 1, 1);
+ return mwl8k_cmd_radio_control(hw, 1, 1);
}
/*
- * CMD_802_11_RF_TX_POWER.
+ * CMD_RF_TX_POWER.
*/
#define MWL8K_TX_POWER_LEVEL_TOTAL 8
-struct mwl8k_cmd_802_11_rf_tx_power {
+struct mwl8k_cmd_rf_tx_power {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 support_level;
@@ -2015,9 +2096,9 @@ struct mwl8k_cmd_802_11_rf_tx_power {
__le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
} __attribute__((packed));
-static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
+static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
{
- struct mwl8k_cmd_802_11_rf_tx_power *cmd;
+ struct mwl8k_cmd_rf_tx_power *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2069,6 +2150,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
}
/*
+ * CMD_SET_BEACON.
+ */
+struct mwl8k_cmd_set_beacon {
+ struct mwl8k_cmd_pkt header;
+ __le16 beacon_len;
+ __u8 beacon[0];
+};
+
+static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *beacon, int len)
+{
+ struct mwl8k_cmd_set_beacon *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
+ cmd->beacon_len = cpu_to_le16(len);
+ memcpy(cmd->beacon, beacon, len);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
@@ -2103,7 +2214,7 @@ struct mwl8k_cmd_set_post_scan {
} __attribute__((packed));
static int
-mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac)
+mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
{
struct mwl8k_cmd_set_post_scan *cmd;
int rc;
@@ -2134,8 +2245,9 @@ struct mwl8k_cmd_set_rf_channel {
} __attribute__((packed));
static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel)
+ struct ieee80211_conf *conf)
{
+ struct ieee80211_channel *channel = conf->channel;
struct mwl8k_cmd_set_rf_channel *cmd;
int rc;
@@ -2147,10 +2259,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
+
if (channel->band == IEEE80211_BAND_2GHZ)
- cmd->channel_flags = cpu_to_le32(0x00000081);
- else
- cmd->channel_flags = cpu_to_le32(0x00000000);
+ cmd->channel_flags |= cpu_to_le32(0x00000001);
+ else if (channel->band == IEEE80211_BAND_5GHZ)
+ cmd->channel_flags |= cpu_to_le32(0x00000004);
+
+ if (conf->channel_type == NL80211_CHAN_NO_HT ||
+ conf->channel_type == NL80211_CHAN_HT20)
+ cmd->channel_flags |= cpu_to_le32(0x00000080);
+ else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->channel_flags |= cpu_to_le32(0x000001900);
+ else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->channel_flags |= cpu_to_le32(0x000000900);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2159,85 +2280,75 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_SLOT.
+ * CMD_SET_AID.
*/
-struct mwl8k_cmd_set_slot {
- struct mwl8k_cmd_pkt header;
- __le16 action;
- __u8 short_slot;
-} __attribute__((packed));
-
-static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
-{
- struct mwl8k_cmd_set_slot *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->short_slot = short_slot_time;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+#define MWL8K_FRAME_PROT_DISABLED 0x00
+#define MWL8K_FRAME_PROT_11G 0x07
+#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
+#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
- return rc;
-}
+struct mwl8k_cmd_update_set_aid {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
-/*
- * CMD_MIMO_CONFIG.
- */
-struct mwl8k_cmd_mimo_config {
- struct mwl8k_cmd_pkt header;
- __le32 action;
- __u8 rx_antenna_map;
- __u8 tx_antenna_map;
+ /* AP's MAC address (BSSID) */
+ __u8 bssid[ETH_ALEN];
+ __le16 protection_mode;
+ __u8 supp_rates[14];
} __attribute__((packed));
-static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
{
- struct mwl8k_cmd_mimo_config *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
- cmd->rx_antenna_map = rx;
- cmd->tx_antenna_map = tx;
+ int i;
+ int j;
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+ /*
+ * Clear nonstandard rates 4 and 13.
+ */
+ mask &= 0x1fef;
- return rc;
+ for (i = 0, j = 0; i < 14; i++) {
+ if (mask & (1 << i))
+ rates[j++] = mwl8k_rates_24[i].hw_value;
+ }
}
-/*
- * CMD_ENABLE_SNIFFER.
- */
-struct mwl8k_cmd_enable_sniffer {
- struct mwl8k_cmd_pkt header;
- __le32 action;
-} __attribute__((packed));
-
-static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 legacy_rate_mask)
{
- struct mwl8k_cmd_enable_sniffer *cmd;
+ struct mwl8k_cmd_update_set_aid *cmd;
+ u16 prot_mode;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32(!!enable);
+ cmd->aid = cpu_to_le16(vif->bss_conf.aid);
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ if (vif->bss_conf.use_cts_prot) {
+ prot_mode = MWL8K_FRAME_PROT_11G;
+ } else {
+ switch (vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
+ break;
+ default:
+ prot_mode = MWL8K_FRAME_PROT_DISABLED;
+ break;
+ }
+ }
+ cmd->protection_mode = cpu_to_le16(prot_mode);
+
+ legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2246,37 +2357,32 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
}
/*
- * CMD_SET_MAC_ADDR.
+ * CMD_SET_RATE.
*/
-struct mwl8k_cmd_set_mac_addr {
- struct mwl8k_cmd_pkt header;
- union {
- struct {
- __le16 mac_type;
- __u8 mac_addr[ETH_ALEN];
- } mbss;
- __u8 mac_addr[ETH_ALEN];
- };
+struct mwl8k_cmd_set_rate {
+ struct mwl8k_cmd_pkt header;
+ __u8 legacy_rates[14];
+
+ /* Bitmap for supported MCS codes. */
+ __u8 mcs_set[16];
+ __u8 reserved[16];
} __attribute__((packed));
-static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
+static int
+mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 legacy_rate_mask, u8 *mcs_rates)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_mac_addr *cmd;
+ struct mwl8k_cmd_set_rate *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- if (priv->ap_fw) {
- cmd->mbss.mac_type = 0;
- memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
- } else {
- memcpy(cmd->mac_addr, mac, ETH_ALEN);
- }
+ legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
+ memcpy(cmd->mcs_set, mcs_rates, 16);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2284,29 +2390,40 @@ static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
return rc;
}
-
/*
- * CMD_SET_RATEADAPT_MODE.
+ * CMD_FINALIZE_JOIN.
*/
-struct mwl8k_cmd_set_rate_adapt_mode {
+#define MWL8K_FJ_BEACON_MAXLEN 128
+
+struct mwl8k_cmd_finalize_join {
struct mwl8k_cmd_pkt header;
- __le16 action;
- __le16 mode;
+ __le32 sleep_interval; /* Number of beacon periods to sleep */
+ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
} __attribute__((packed));
-static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
+static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
+ int framelen, int dtim)
{
- struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ struct mwl8k_cmd_finalize_join *cmd;
+ struct ieee80211_mgmt *payload = frame;
+ int payload_len;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->mode = cpu_to_le16(mode);
+ cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
+
+ payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
+ if (payload_len < 0)
+ payload_len = 0;
+ else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
+ payload_len = MWL8K_FJ_BEACON_MAXLEN;
+
+ memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2315,59 +2432,57 @@ static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
}
/*
- * CMD_SET_WMM_MODE.
+ * CMD_SET_RTS_THRESHOLD.
*/
-struct mwl8k_cmd_set_wmm {
+struct mwl8k_cmd_set_rts_threshold {
struct mwl8k_cmd_pkt header;
__le16 action;
+ __le16 threshold;
} __attribute__((packed));
-static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_wmm *cmd;
+ struct mwl8k_cmd_set_rts_threshold *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(!!enable);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->threshold = cpu_to_le16(rts_thresh);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
- if (!rc)
- priv->wmm_enabled = enable;
-
return rc;
}
/*
- * CMD_SET_RTS_THRESHOLD.
+ * CMD_SET_SLOT.
*/
-struct mwl8k_cmd_rts_threshold {
+struct mwl8k_cmd_set_slot {
struct mwl8k_cmd_pkt header;
__le16 action;
- __le16 threshold;
+ __u8 short_slot;
} __attribute__((packed));
-static int mwl8k_rts_threshold(struct ieee80211_hw *hw,
- u16 action, u16 threshold)
+static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
{
- struct mwl8k_cmd_rts_threshold *cmd;
+ struct mwl8k_cmd_set_slot *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(action);
- cmd->threshold = cpu_to_le16(threshold);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->short_slot = short_slot_time;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2426,9 +2541,9 @@ struct mwl8k_cmd_set_edca_params {
MWL8K_SET_EDCA_AIFS)
static int
-mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
- __u16 cw_min, __u16 cw_max,
- __u8 aifs, __u16 txop)
+mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
+ __u16 cw_min, __u16 cw_max,
+ __u8 aifs, __u16 txop)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_set_edca_params *cmd;
@@ -2438,12 +2553,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
if (cmd == NULL)
return -ENOMEM;
- /*
- * Queues 0 (BE) and 1 (BK) are swapped in hardware for
- * this call.
- */
- qnum ^= !(qnum >> 1);
-
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2467,170 +2576,259 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
}
/*
- * CMD_FINALIZE_JOIN.
+ * CMD_SET_WMM_MODE.
*/
-#define MWL8K_FJ_BEACON_MAXLEN 128
-
-struct mwl8k_cmd_finalize_join {
+struct mwl8k_cmd_set_wmm_mode {
struct mwl8k_cmd_pkt header;
- __le32 sleep_interval; /* Number of beacon periods to sleep */
- __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
+ __le16 action;
} __attribute__((packed));
-static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
- int framelen, int dtim)
+static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_cmd_finalize_join *cmd;
- struct ieee80211_mgmt *payload = frame;
- int payload_len;
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_cmd_set_wmm_mode *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
-
- payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
- if (payload_len < 0)
- payload_len = 0;
- else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
- payload_len = MWL8K_FJ_BEACON_MAXLEN;
-
- memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
+ cmd->action = cpu_to_le16(!!enable);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
+ if (!rc)
+ priv->wmm_enabled = enable;
+
return rc;
}
/*
- * CMD_UPDATE_STADB.
+ * CMD_MIMO_CONFIG.
*/
-struct mwl8k_cmd_update_sta_db {
+struct mwl8k_cmd_mimo_config {
struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __u8 rx_antenna_map;
+ __u8 tx_antenna_map;
+} __attribute__((packed));
- /* See STADB_ACTION_TYPE */
- __le32 action;
+static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+{
+ struct mwl8k_cmd_mimo_config *cmd;
+ int rc;
- /* Peer MAC address */
- __u8 peer_addr[ETH_ALEN];
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- __le32 reserved;
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
+ cmd->rx_antenna_map = rx;
+ cmd->tx_antenna_map = tx;
- /* Peer info - valid during add/update. */
- struct peer_capability_info peer_info;
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_USE_FIXED_RATE (STA version).
+ */
+struct mwl8k_cmd_use_fixed_rate_sta {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[8];
+ __le32 rate_type;
+ __le32 reserved1;
+ __le32 reserved2;
} __attribute__((packed));
-static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, __u32 action)
+#define MWL8K_USE_AUTO_RATE 0x0002
+#define MWL8K_UCAST_RATE 0
+
+static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_sta_db *cmd;
- struct peer_capability_info *peer_info;
+ struct mwl8k_cmd_use_fixed_rate_sta *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
- cmd->action = cpu_to_le32(action);
- peer_info = &cmd->peer_info;
- memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- switch (action) {
- case MWL8K_STA_DB_ADD_ENTRY:
- case MWL8K_STA_DB_MODIFY_ENTRY:
- /* Build peer_info block */
- peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
- peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
- memcpy(peer_info->legacy_rates, mwl8k_rateids,
- sizeof(mwl8k_rateids));
- peer_info->interop = 1;
- peer_info->amsdu_enabled = 0;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = peer_info->station_id;
+ return rc;
+}
- break;
+/*
+ * CMD_USE_FIXED_RATE (AP version).
+ */
+struct mwl8k_cmd_use_fixed_rate_ap {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct mwl8k_rate_entry_ap {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[4];
+ u8 multicast_rate;
+ u8 multicast_rate_type;
+ u8 management_rate;
+} __attribute__((packed));
- case MWL8K_STA_DB_DEL_ENTRY:
- case MWL8K_STA_DB_FLUSH:
- default:
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = 0;
- break;
- }
+static int
+mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
+{
+ struct mwl8k_cmd_use_fixed_rate_ap *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->multicast_rate = mcast;
+ cmd->management_rate = mgmt;
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_SET_AID.
+ * CMD_ENABLE_SNIFFER.
*/
-#define MWL8K_FRAME_PROT_DISABLED 0x00
-#define MWL8K_FRAME_PROT_11G 0x07
-#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
-#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
-
-struct mwl8k_cmd_update_set_aid {
- struct mwl8k_cmd_pkt header;
- __le16 aid;
-
- /* AP's MAC address (BSSID) */
- __u8 bssid[ETH_ALEN];
- __le16 protection_mode;
- __u8 supp_rates[14];
+struct mwl8k_cmd_enable_sniffer {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
} __attribute__((packed));
-static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_set_aid *cmd;
- u16 prot_mode;
+ struct mwl8k_cmd_enable_sniffer *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->aid = cpu_to_le16(info->aid);
+ cmd->action = cpu_to_le32(!!enable);
- memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (info->use_cts_prot) {
- prot_mode = MWL8K_FRAME_PROT_11G;
+ return rc;
+}
+
+/*
+ * CMD_SET_MAC_ADDR.
+ */
+struct mwl8k_cmd_set_mac_addr {
+ struct mwl8k_cmd_pkt header;
+ union {
+ struct {
+ __le16 mac_type;
+ __u8 mac_addr[ETH_ALEN];
+ } mbss;
+ __u8 mac_addr[ETH_ALEN];
+ };
+} __attribute__((packed));
+
+#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
+#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
+#define MWL8K_MAC_TYPE_PRIMARY_AP 2
+#define MWL8K_MAC_TYPE_SECONDARY_AP 3
+
+static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_cmd_set_mac_addr *cmd;
+ int mac_type;
+ int rc;
+
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+ } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ if (priv->ap_fw) {
+ cmd->mbss.mac_type = cpu_to_le16(mac_type);
+ memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
} else {
- switch (info->ht_operation_mode &
- IEEE80211_HT_OP_MODE_PROTECTION) {
- case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
- break;
- case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
- break;
- default:
- prot_mode = MWL8K_FRAME_PROT_DISABLED;
- break;
- }
+ memcpy(cmd->mac_addr, mac, ETH_ALEN);
}
- cmd->protection_mode = cpu_to_le16(prot_mode);
- memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_SET_RATEADAPT_MODE.
+ */
+struct mwl8k_cmd_set_rate_adapt_mode {
+ struct mwl8k_cmd_pkt header;
+ __le16 action;
+ __le16 mode;
+} __attribute__((packed));
+
+static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
+{
+ struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->mode = cpu_to_le16(mode);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2639,115 +2837,255 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_RATE.
+ * CMD_BSS_START.
*/
-struct mwl8k_cmd_update_rateset {
- struct mwl8k_cmd_pkt header;
- __u8 legacy_rates[14];
-
- /* Bitmap for supported MCS codes. */
- __u8 mcs_set[16];
- __u8 reserved[16];
+struct mwl8k_cmd_bss_start {
+ struct mwl8k_cmd_pkt header;
+ __le32 enable;
} __attribute__((packed));
-static int mwl8k_update_rateset(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int enable)
{
- struct mwl8k_cmd_update_rateset *cmd;
+ struct mwl8k_cmd_bss_start *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ cmd->enable = cpu_to_le32(enable);
- rc = mwl8k_post_cmd(hw, &cmd->header);
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_USE_FIXED_RATE.
+ * CMD_SET_NEW_STN.
*/
-#define MWL8K_RATE_TABLE_SIZE 8
-#define MWL8K_UCAST_RATE 0
-#define MWL8K_USE_AUTO_RATE 0x0002
+struct mwl8k_cmd_set_new_stn {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
+ __u8 mac_addr[6];
+ __le16 stn_id;
+ __le16 action;
+ __le16 rsvd;
+ __le32 legacy_rates;
+ __u8 ht_rates[4];
+ __le16 cap_info;
+ __le16 ht_capabilities_info;
+ __u8 mac_ht_param_info;
+ __u8 rev;
+ __u8 control_channel;
+ __u8 add_channel;
+ __le16 op_mode;
+ __le16 stbc;
+ __u8 add_qos_info;
+ __u8 is_qos_sta;
+ __le32 fw_sta_ptr;
+} __attribute__((packed));
-struct mwl8k_rate_entry {
- /* Set to 1 if HT rate, 0 if legacy. */
- __le32 is_ht_rate;
+#define MWL8K_STA_ACTION_ADD 0
+#define MWL8K_STA_ACTION_REMOVE 2
- /* Set to 1 to use retry_count field. */
- __le32 enable_retry;
+static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ u32 rates;
+ int rc;
- /* Specified legacy rate or MCS. */
- __le32 rate;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->aid = cpu_to_le16(sta->aid);
+ memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
+ cmd->stn_id = cpu_to_le16(sta->aid);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ cmd->legacy_rates = cpu_to_le32(rates);
+ if (sta->ht_cap.ht_supported) {
+ cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
+ cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
+ cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
+ cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
+ cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
+ cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ cmd->is_qos_sta = 1;
+ }
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
- /* Number of allowed retries. */
- __le32 retry_count;
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_UPDATE_STADB.
+ */
+struct ewc_ht_info {
+ __le16 control1;
+ __le16 control2;
+ __le16 control3;
} __attribute__((packed));
-struct mwl8k_rate_table {
- /* 1 to allow specified rate and below */
- __le32 allow_rate_drop;
- __le32 num_rates;
- struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE];
+struct peer_capability_info {
+ /* Peer type - AP vs. STA. */
+ __u8 peer_type;
+
+ /* Basic 802.11 capabilities from assoc resp. */
+ __le16 basic_caps;
+
+ /* Set if peer supports 802.11n high throughput (HT). */
+ __u8 ht_support;
+
+ /* Valid if HT is supported. */
+ __le16 ht_caps;
+ __u8 extended_ht_caps;
+ struct ewc_ht_info ewc_info;
+
+ /* Legacy rate table. Intersection of our rates and peer rates. */
+ __u8 legacy_rates[12];
+
+ /* HT rate table. Intersection of our rates and peer rates. */
+ __u8 ht_rates[16];
+ __u8 pad[16];
+
+ /* If set, interoperability mode, no proprietary extensions. */
+ __u8 interop;
+ __u8 pad2;
+ __u8 station_id;
+ __le16 amsdu_enabled;
} __attribute__((packed));
-struct mwl8k_cmd_use_fixed_rate {
- struct mwl8k_cmd_pkt header;
+struct mwl8k_cmd_update_stadb {
+ struct mwl8k_cmd_pkt header;
+
+ /* See STADB_ACTION_TYPE */
__le32 action;
- struct mwl8k_rate_table rate_table;
- /* Unicast, Broadcast or Multicast */
- __le32 rate_type;
- __le32 reserved1;
- __le32 reserved2;
+ /* Peer MAC address */
+ __u8 peer_addr[ETH_ALEN];
+
+ __le32 reserved;
+
+ /* Peer info - valid during add/update. */
+ struct peer_capability_info peer_info;
} __attribute__((packed));
-static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
- u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table)
+#define MWL8K_STA_DB_MODIFY_ENTRY 1
+#define MWL8K_STA_DB_DEL_ENTRY 2
+
+/* Peer Entry flags - used to define the type of the peer node */
+#define MWL8K_PEER_TYPE_ACCESSPOINT 2
+
+static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
- struct mwl8k_cmd_use_fixed_rate *cmd;
- int count;
+ struct mwl8k_cmd_update_stadb *cmd;
+ struct peer_capability_info *p;
+ u32 rates;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
+ memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
+
+ p = &cmd->peer_info;
+ p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
+ p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
+ p->ht_support = sta->ht_cap.ht_supported;
+ p->ht_caps = sta->ht_cap.cap;
+ p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ legacy_rate_mask_to_array(p->legacy_rates, rates);
+ memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+ p->interop = 1;
+ p->amsdu_enabled = 0;
- cmd->action = cpu_to_le32(action);
- cmd->rate_type = cpu_to_le32(rate_type);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (rate_table != NULL) {
- /*
- * Copy over each field manually so that endian
- * conversion can be done.
- */
- cmd->rate_table.allow_rate_drop =
- cpu_to_le32(rate_table->allow_rate_drop);
- cmd->rate_table.num_rates =
- cpu_to_le32(rate_table->num_rates);
-
- for (count = 0; count < rate_table->num_rates; count++) {
- struct mwl8k_rate_entry *dst =
- &cmd->rate_table.rate_entry[count];
- struct mwl8k_rate_entry *src =
- &rate_table->rate_entry[count];
-
- dst->is_ht_rate = cpu_to_le32(src->is_ht_rate);
- dst->enable_retry = cpu_to_le32(src->enable_retry);
- dst->rate = cpu_to_le32(src->rate);
- dst->retry_count = cpu_to_le32(src->retry_count);
- }
- }
+ return rc ? rc : p->station_id;
+}
+
+static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_update_stadb *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
+ memcpy(cmd->peer_addr, addr, ETH_ALEN);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2766,19 +3104,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
u32 status;
status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
- iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
-
if (!status)
return IRQ_NONE;
- if (status & MWL8K_A2H_INT_TX_DONE)
- tasklet_schedule(&priv->tx_reclaim_task);
+ if (status & MWL8K_A2H_INT_TX_DONE) {
+ status &= ~MWL8K_A2H_INT_TX_DONE;
+ tasklet_schedule(&priv->poll_tx_task);
+ }
if (status & MWL8K_A2H_INT_RX_READY) {
- while (rxq_process(hw, 0, 1))
- rxq_refill(hw, 0, 1);
+ status &= ~MWL8K_A2H_INT_RX_READY;
+ tasklet_schedule(&priv->poll_rx_task);
}
+ if (status)
+ iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+
if (status & MWL8K_A2H_INT_OPC_DONE) {
if (priv->hostcmd_wait != NULL)
complete(priv->hostcmd_wait);
@@ -2793,6 +3134,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mwl8k_tx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+ int i;
+
+ limit = 32;
+
+ spin_lock_bh(&priv->tx_lock);
+
+ for (i = 0; i < MWL8K_TX_QUEUES; i++)
+ limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
+
+ if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
+ complete(priv->tx_wait);
+ priv->tx_wait = NULL;
+ }
+
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_TX_DONE,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_tx_task);
+ }
+}
+
+static void mwl8k_rx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+
+ limit = 32;
+ limit -= rxq_process(hw, 0, limit);
+ limit -= rxq_refill(hw, 0, limit);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_rx_task);
+ }
+}
+
/*
* Core driver operations.
@@ -2803,7 +3191,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int index = skb_get_queue_mapping(skb);
int rc;
- if (priv->current_channel == NULL) {
+ if (!priv->radio_on) {
printk(KERN_DEBUG "%s: dropped TX frame since radio "
"disabled\n", wiphy_name(hw->wiphy));
dev_kfree_skb(skb);
@@ -2828,19 +3216,20 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return -EIO;
}
- /* Enable tx reclaim tasklet */
- tasklet_enable(&priv->tx_reclaim_task);
+ /* Enable TX reclaim and RX tasklets. */
+ tasklet_enable(&priv->poll_tx_task);
+ tasklet_enable(&priv->poll_rx_task);
/* Enable interrupts */
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
rc = mwl8k_fw_lock(hw);
if (!rc) {
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (!priv->ap_fw) {
if (!rc)
- rc = mwl8k_enable_sniffer(hw, 0);
+ rc = mwl8k_cmd_enable_sniffer(hw, 0);
if (!rc)
rc = mwl8k_cmd_set_pre_scan(hw);
@@ -2851,10 +3240,10 @@ static int mwl8k_start(struct ieee80211_hw *hw)
}
if (!rc)
- rc = mwl8k_cmd_setrateadaptmode(hw, 0);
+ rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
if (!rc)
- rc = mwl8k_set_wmm(hw, 0);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 0);
mwl8k_fw_unlock(hw);
}
@@ -2862,7 +3251,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
}
return rc;
@@ -2873,7 +3263,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
int i;
- mwl8k_cmd_802_11_radio_disable(hw);
+ mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
@@ -2886,36 +3276,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
if (priv->beacon_skb != NULL)
dev_kfree_skb(priv->beacon_skb);
- /* Stop tx reclaim tasklet */
- tasklet_disable(&priv->tx_reclaim_task);
+ /* Stop TX reclaim and RX tasklets. */
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
}
static int mwl8k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif;
-
- /*
- * We only support one active interface at a time.
- */
- if (priv->vif != NULL)
- return -EBUSY;
-
- /*
- * We only support managed interfaces for now.
- */
- if (conf->type != NL80211_IFTYPE_STATION)
- return -EINVAL;
+ u32 macids_supported;
+ int macid;
/*
* Reject interface creation if sniffer mode is active, as
* STA operation is mutually exclusive with hardware sniffer
- * mode.
+ * mode. (Sniffer mode is only used on STA firmware.)
*/
if (priv->sniffer_enabled) {
printk(KERN_INFO "%s: unable to create STA "
@@ -2924,37 +3305,54 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return -EINVAL;
}
- /* Clean out driver private area */
- mwl8k_vif = MWL8K_VIF(conf->vif);
- memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
- /* Set and save the mac address */
- mwl8k_set_mac_addr(hw, conf->mac_addr);
- memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ macids_supported = priv->ap_macids_supported;
+ break;
+ case NL80211_IFTYPE_STATION:
+ macids_supported = priv->sta_macids_supported;
+ break;
+ default:
+ return -EINVAL;
+ }
- /* Back pointer to parent config block */
- mwl8k_vif->priv = priv;
+ macid = ffs(macids_supported & ~priv->macids_used);
+ if (!macid--)
+ return -EBUSY;
- /* Set Initial sequence number to zero */
+ /* Setup driver private area. */
+ mwl8k_vif = MWL8K_VIF(vif);
+ memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
+ mwl8k_vif->vif = vif;
+ mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
- priv->vif = conf->vif;
- priv->current_channel = NULL;
+ /* Set the mac address. */
+ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
+
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_add_self(hw, vif);
+
+ priv->macids_used |= 1 << mwl8k_vif->macid;
+ list_add_tail(&mwl8k_vif->list, &priv->vif_list);
return 0;
}
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- if (priv->vif == NULL)
- return;
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
- mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->vif = NULL;
+ priv->macids_used &= ~(1 << mwl8k_vif->macid);
+ list_del(&mwl8k_vif->list);
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2964,8 +3362,7 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
int rc;
if (conf->flags & IEEE80211_CONF_IDLE) {
- mwl8k_cmd_802_11_radio_disable(hw);
- priv->current_channel = NULL;
+ mwl8k_cmd_radio_disable(hw);
return 0;
}
@@ -2973,19 +3370,17 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
return rc;
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (rc)
goto out;
- rc = mwl8k_cmd_set_rf_channel(hw, conf->channel);
+ rc = mwl8k_cmd_set_rf_channel(hw, conf);
if (rc)
goto out;
- priv->current_channel = conf->channel;
-
if (conf->power_level > 18)
conf->power_level = 18;
- rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level);
+ rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
if (rc)
goto out;
@@ -3003,79 +3398,160 @@ out:
return rc;
}
-static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+static void
+mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ u32 ap_legacy_rates;
+ u8 ap_mcs_rates[16];
int rc;
- if ((changed & BSS_CHANGED_ASSOC) == 0)
+ if (mwl8k_fw_lock(hw))
return;
- priv->capture_beacon = false;
-
- rc = mwl8k_fw_lock(hw);
- if (rc)
- return;
+ /*
+ * No need to capture a beacon if we're no longer associated.
+ */
+ if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
+ priv->capture_beacon = false;
- if (info->assoc) {
- memcpy(&mwl8k_vif->bss_info, info,
- sizeof(struct ieee80211_bss_conf));
+ /*
+ * Get the AP's legacy and MCS rates.
+ */
+ if (vif->bss_conf.assoc) {
+ struct ieee80211_sta *ap;
- memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN);
+ rcu_read_lock();
- /* Install rates */
- rc = mwl8k_update_rateset(hw, vif);
- if (rc)
+ ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (ap == NULL) {
+ rcu_read_unlock();
goto out;
+ }
- /* Turn on rate adaptation */
- rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE,
- MWL8K_UCAST_RATE, NULL);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
+ ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+ } else {
+ ap_legacy_rates =
+ ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ }
+ memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
+
+ rcu_read_unlock();
+ }
+
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+ rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
- /* Set radio preamble */
- rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble);
+ rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
+ }
- /* Set slot time */
- rc = mwl8k_cmd_set_slot(hw, info->use_short_slot);
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
if (rc)
goto out;
+ }
- /* Update peer rate info */
- rc = mwl8k_cmd_update_sta_db(hw, vif,
- MWL8K_STA_DB_MODIFY_ENTRY);
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
+ }
- /* Set AID */
- rc = mwl8k_cmd_set_aid(hw, vif);
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT))) {
+ rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
if (rc)
goto out;
+ }
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
* next beacon from our BSSID.
*/
- memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN);
+ memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
priv->capture_beacon = true;
- } else {
- rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
- memset(&mwl8k_vif->bss_info, 0,
- sizeof(struct ieee80211_bss_conf));
- memset(mwl8k_vif->bssid, 0, ETH_ALEN);
}
out:
mwl8k_fw_unlock(hw);
}
+static void
+mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ int rc;
+
+ if (mwl8k_fw_lock(hw))
+ return;
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
+ if (rc)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ int idx;
+ int rate;
+
+ /*
+ * Use lowest supported basic rate for multicasts
+ * and management frames (such as probe responses --
+ * beacons will always go out at 1 Mb/s).
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
+
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get(hw, vif);
+ if (skb != NULL) {
+ mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
+ kfree_skb(skb);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
+
+out:
+ mwl8k_fw_unlock(hw);
+}
+
+static void
+mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if (!priv->ap_fw)
+ mwl8k_bss_info_changed_sta(hw, vif, info, changed);
+ else
+ mwl8k_bss_info_changed_ap(hw, vif, info, changed);
+}
+
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mclist)
{
@@ -3105,7 +3581,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
* operation, so refuse to enable sniffer mode if a STA
* interface is active.
*/
- if (priv->vif != NULL) {
+ if (!list_empty(&priv->vif_list)) {
if (net_ratelimit())
printk(KERN_INFO "%s: not enabling sniffer "
"mode because STA interface is active\n",
@@ -3114,7 +3590,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
}
if (!priv->sniffer_enabled) {
- if (mwl8k_enable_sniffer(hw, 1))
+ if (mwl8k_cmd_enable_sniffer(hw, 1))
return 0;
priv->sniffer_enabled = true;
}
@@ -3126,6 +3602,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
return 1;
}
+static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
+{
+ if (!list_empty(&priv->vif_list))
+ return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
+
+ return NULL;
+}
+
static void mwl8k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -3157,11 +3641,13 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
/* Clear unsupported feature flags */
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
- if (mwl8k_fw_lock(hw))
+ if (mwl8k_fw_lock(hw)) {
+ kfree(cmd);
return;
+ }
if (priv->sniffer_enabled) {
- mwl8k_enable_sniffer(hw, 0);
+ mwl8k_cmd_enable_sniffer(hw, 0);
priv->sniffer_enabled = false;
}
@@ -3172,7 +3658,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
mwl8k_cmd_set_pre_scan(hw);
} else {
- u8 *bssid;
+ struct mwl8k_vif *mwl8k_vif;
+ const u8 *bssid;
/*
* Enable the BSS filter.
@@ -3182,9 +3669,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
* (where the OUI part needs to be nonzero for
* the BSSID to be accepted by POST_SCAN).
*/
- bssid = "\x01\x00\x00\x00\x00\x00";
- if (priv->vif != NULL)
- bssid = MWL8K_VIF(priv->vif)->bssid;
+ mwl8k_vif = mwl8k_first_vif(priv);
+ if (mwl8k_vif != NULL)
+ bssid = mwl8k_vif->vif->bss_conf.bssid;
+ else
+ bssid = "\x01\x00\x00\x00\x00\x00";
mwl8k_cmd_set_post_scan(hw, bssid);
}
@@ -3211,7 +3700,39 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value);
+ return mwl8k_cmd_set_rts_threshold(hw, value);
+}
+
+static int mwl8k_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if (priv->ap_fw)
+ return mwl8k_cmd_set_new_stn_del(hw, vif, sta->addr);
+ else
+ return mwl8k_cmd_update_stadb_del(hw, vif, sta->addr);
+}
+
+static int mwl8k_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ int ret;
+
+ if (!priv->ap_fw) {
+ ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
+ if (ret >= 0) {
+ MWL8K_STA(sta)->peer_id = ret;
+ return 0;
+ }
+
+ return ret;
+ }
+
+ return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3223,14 +3744,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
rc = mwl8k_fw_lock(hw);
if (!rc) {
if (!priv->wmm_enabled)
- rc = mwl8k_set_wmm(hw, 1);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 1);
if (!rc)
- rc = mwl8k_set_edca_params(hw, queue,
- params->cw_min,
- params->cw_max,
- params->aifs,
- params->txop);
+ rc = mwl8k_cmd_set_edca_params(hw, queue,
+ params->cw_min,
+ params->cw_max,
+ params->aifs,
+ params->txop);
mwl8k_fw_unlock(hw);
}
@@ -3238,28 +3759,26 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
return rc;
}
-static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
+static int mwl8k_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_tx_queue *txq;
- int index;
-
- spin_lock_bh(&priv->tx_lock);
- for (index = 0; index < MWL8K_TX_QUEUES; index++) {
- txq = priv->txq + index;
- memcpy(&stats[index], &txq->stats,
- sizeof(struct ieee80211_tx_queue_stats));
- }
- spin_unlock_bh(&priv->tx_lock);
-
- return 0;
+ return mwl8k_cmd_get_stat(hw, stats);
}
-static int mwl8k_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
+static int
+mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- return mwl8k_cmd_802_11_get_stat(hw, stats);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
}
static const struct ieee80211_ops mwl8k_ops = {
@@ -3273,67 +3792,73 @@ static const struct ieee80211_ops mwl8k_ops = {
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
.set_rts_threshold = mwl8k_set_rts_threshold,
+ .sta_add = mwl8k_sta_add,
+ .sta_remove = mwl8k_sta_remove,
.conf_tx = mwl8k_conf_tx,
- .get_tx_stats = mwl8k_get_tx_stats,
.get_stats = mwl8k_get_stats,
+ .ampdu_action = mwl8k_ampdu_action,
};
-static void mwl8k_tx_reclaim_handler(unsigned long data)
-{
- int i;
- struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
- struct mwl8k_priv *priv = hw->priv;
-
- spin_lock_bh(&priv->tx_lock);
- for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 0);
-
- if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
- complete(priv->tx_wait);
- priv->tx_wait = NULL;
- }
- spin_unlock_bh(&priv->tx_lock);
-}
-
static void mwl8k_finalize_join_worker(struct work_struct *work)
{
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, finalize_join_worker);
struct sk_buff *skb = priv->beacon_skb;
- u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM,
+ mgmt->u.beacon.variable, len);
+ int dtim_period = 1;
- mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim);
- dev_kfree_skb(skb);
+ if (tim && tim[1] >= 2)
+ dtim_period = tim[3];
+
+ mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period);
+ dev_kfree_skb(skb);
priv->beacon_skb = NULL;
}
enum {
- MWL8687 = 0,
+ MWL8363 = 0,
+ MWL8687,
MWL8366,
};
static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
- {
+ [MWL8363] = {
+ .part_name = "88w8363",
+ .helper_image = "mwl8k/helper_8363.fw",
+ .fw_image = "mwl8k/fmimage_8363.fw",
+ },
+ [MWL8687] = {
.part_name = "88w8687",
.helper_image = "mwl8k/helper_8687.fw",
.fw_image = "mwl8k/fmimage_8687.fw",
- .rxd_ops = &rxd_8687_ops,
- .modes = BIT(NL80211_IFTYPE_STATION),
},
- {
+ [MWL8366] = {
.part_name = "88w8366",
.helper_image = "mwl8k/helper_8366.fw",
.fw_image = "mwl8k/fmimage_8366.fw",
- .rxd_ops = &rxd_8366_ops,
- .modes = 0,
+ .ap_rxd_ops = &rxd_8366_ap_ops,
},
};
+MODULE_FIRMWARE("mwl8k/helper_8363.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
+MODULE_FIRMWARE("mwl8k/helper_8687.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
+MODULE_FIRMWARE("mwl8k/helper_8366.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
+
static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+ { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
+ { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
+ { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3352,6 +3877,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
printed_version = 1;
}
+
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR "%s: Cannot enable new PCI device\n",
@@ -3368,6 +3894,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+
hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
if (hw == NULL) {
printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
@@ -3375,17 +3902,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_reg;
}
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
priv = hw->priv;
priv->hw = hw;
priv->pdev = pdev;
priv->device_info = &mwl8k_info_tbl[id->driver_data];
- priv->rxd_ops = priv->device_info->rxd_ops;
- priv->sniffer_enabled = false;
- priv->wmm_enabled = false;
- priv->pending_tx_pkts = 0;
- SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
@@ -3408,16 +3932,46 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
}
}
- memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
- priv->band.band = IEEE80211_BAND_2GHZ;
- priv->band.channels = priv->channels;
- priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
- priv->band.bitrates = priv->rates;
- priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates));
- memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates));
+ /* Reset firmware and hardware */
+ mwl8k_hw_reset(priv);
+
+ /* Ask userland hotplug daemon for the device firmware */
+ rc = mwl8k_request_firmware(priv);
+ if (rc) {
+ printk(KERN_ERR "%s: Firmware files not found\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Load firmware into hardware */
+ rc = mwl8k_load_firmware(hw);
+ if (rc) {
+ printk(KERN_ERR "%s: Cannot start firmware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Reclaim memory once firmware is successfully loaded */
+ mwl8k_release_firmware(priv);
+
+
+ if (priv->ap_fw) {
+ priv->rxd_ops = priv->device_info->ap_rxd_ops;
+ if (priv->rxd_ops == NULL) {
+ printk(KERN_ERR "%s: Driver does not have AP "
+ "firmware image support for this hardware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+ } else {
+ priv->rxd_ops = &rxd_sta_ops;
+ }
+
+ priv->sniffer_enabled = false;
+ priv->wmm_enabled = false;
+ priv->pending_tx_pkts = 0;
+
/*
* Extra headroom is the size of the required DMA header
@@ -3430,12 +3984,13 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
hw->queues = MWL8K_TX_QUEUES;
- hw->wiphy->interface_modes = priv->device_info->modes;
-
/* Set rssi and noise values to dBm */
hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
- priv->vif = NULL;
+ hw->sta_data_size = sizeof(struct mwl8k_sta);
+
+ priv->macids_used = 0;
+ INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
priv->radio_on = 0;
@@ -3444,19 +3999,20 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
- /* TX reclaim tasklet */
- tasklet_init(&priv->tx_reclaim_task,
- mwl8k_tx_reclaim_handler, (unsigned long)hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ /* TX reclaim and RX tasklets. */
+ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
if (priv->cookie == NULL)
- goto err_iounmap;
+ goto err_stop_firmware;
rc = mwl8k_rxq_init(hw, 0);
if (rc)
- goto err_iounmap;
+ goto err_free_cookie;
rxq_refill(hw, 0, INT_MAX);
mutex_init(&priv->fw_mutex);
@@ -3476,7 +4032,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
+ iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3487,31 +4044,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_queues;
}
- /* Reset firmware and hardware */
- mwl8k_hw_reset(priv);
-
- /* Ask userland hotplug daemon for the device firmware */
- rc = mwl8k_request_firmware(priv);
- if (rc) {
- printk(KERN_ERR "%s: Firmware files not found\n",
- wiphy_name(hw->wiphy));
- goto err_free_irq;
- }
-
- /* Load firmware into hardware */
- rc = mwl8k_load_firmware(hw);
- if (rc) {
- printk(KERN_ERR "%s: Cannot start firmware\n",
- wiphy_name(hw->wiphy));
- goto err_stop_firmware;
- }
-
- /* Reclaim memory once firmware is successfully loaded */
- mwl8k_release_firmware(priv);
-
/*
* Temporarily enable interrupts. Initial firmware host
- * commands use interrupts and avoids polling. Disable
+ * commands use interrupts and avoid polling. Disable
* interrupts when done.
*/
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3527,22 +4062,29 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot initialise firmware\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
+ hw->wiphy->interface_modes = 0;
+ if (priv->ap_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ if (priv->sta_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+
+
/* Turn radio off */
- rc = mwl8k_cmd_802_11_radio_disable(hw);
+ rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Clear MAC address */
- rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
printk(KERN_ERR "%s: Cannot clear MAC address\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Disable interrupts */
@@ -3553,7 +4095,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot register device\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_queues;
}
printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3565,10 +4107,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
return 0;
-err_stop_firmware:
- mwl8k_hw_reset(priv);
- mwl8k_release_firmware(priv);
-
err_free_irq:
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
@@ -3578,11 +4116,16 @@ err_free_queues:
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
-err_iounmap:
+err_free_cookie:
if (priv->cookie != NULL)
pci_free_consistent(priv->pdev, 4,
priv->cookie, priv->cookie_dma);
+err_stop_firmware:
+ mwl8k_hw_reset(priv);
+ mwl8k_release_firmware(priv);
+
+err_iounmap:
if (priv->regs != NULL)
pci_iounmap(pdev, priv->regs);
@@ -3620,15 +4163,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
ieee80211_unregister_hw(hw);
- /* Remove tx reclaim tasklet */
- tasklet_kill(&priv->tx_reclaim_task);
+ /* Remove TX reclaim and RX tasklets. */
+ tasklet_kill(&priv->poll_tx_task);
+ tasklet_kill(&priv->poll_rx_task);
/* Stop hardware */
mwl8k_hw_reset(priv);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index cfa72962052b..5ea0f7cf85b1 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -3,6 +3,7 @@
* See copyright notice in main.c
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/device.h>
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 404830f47ab2..e6369242e49c 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -1028,7 +1028,7 @@ int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx)
}
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
- struct dev_addr_list *mc_list,
+ struct net_device *dev,
int mc_count, int promisc)
{
hermes_t *hw = &priv->hw;
@@ -1049,24 +1049,16 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
* group address if either we want to multicast, or if we were
* multicasting and want to stop */
if (!promisc && (mc_count || priv->mc_count)) {
- struct dev_mc_list *p = mc_list;
+ struct dev_mc_list *p;
struct hermes_multicast mclist;
- int i;
+ int i = 0;
- for (i = 0; i < mc_count; i++) {
- /* paranoia: is list shorter than mc_count? */
- BUG_ON(!p);
- /* paranoia: bad address size in list? */
- BUG_ON(p->dmi_addrlen != ETH_ALEN);
-
- memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
- p = p->next;
+ netdev_for_each_mc_addr(p, dev) {
+ if (i == mc_count)
+ break;
+ memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN);
}
- if (p)
- printk(KERN_WARNING "%s: Multicast list is "
- "longer than mc_count\n", priv->ndev->name);
-
err = hermes_write_ltv(hw, USER_BAP,
HERMES_RID_CNFGROUPADDRESSES,
HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index e2f7fdc4d45a..9799a1d14a63 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -43,7 +43,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
u8 *tsc, size_t tsc_len);
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
- struct dev_addr_list *mc_list,
+ struct net_device *dev,
int mc_count, int promisc);
int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
char buf[IW_ESSID_MAX_SIZE+1]);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 753a1804eee7..413e9ab6cab3 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -78,6 +78,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -1668,16 +1669,15 @@ __orinoco_set_multicast_list(struct net_device *dev)
/* The Hermes doesn't seem to have an allmulti mode, so we go
* into promiscuous mode and let the upper levels deal. */
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > MAX_MULTICAST(priv))) {
+ (netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
promisc = 1;
mc_count = 0;
} else {
promisc = 0;
- mc_count = dev->mc_count;
+ mc_count = netdev_mc_count(dev);
}
- err = __orinoco_hw_set_multicast_list(priv, dev->mc_list, mc_count,
- promisc);
+ err = __orinoco_hw_set_multicast_list(priv, dev, mc_count, promisc);
return err;
}
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index f27bb8367c98..1d4ada188eda 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -407,7 +407,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
- PCMCIA_DEVICE_PROD_ID123("AIRVAST", "IEEE 802.11b Wireless PCMCIA Card", "HFA3863", 0xea569531, 0x4bcb9645, 0x355cb092),
PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
@@ -417,7 +416,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
- PCMCIA_DEVICE_PROD_ID123("corega", "WL PCCL-11", "ISL37300P", 0x0a21501a, 0x59868926, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
@@ -432,7 +430,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
- PCMCIA_DEVICE_PROD_ID123("Intersil", "PRISM Freedom PCMCIA Adapter", "ISL37100P", 0x4b801a17, 0xf222ec2d, 0x630d52b2),
PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
@@ -445,7 +442,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
- PCMCIA_DEVICE_PROD_ID123("PCMCIA", "11M WLAN Card v2.5", "ISL37300P", 0x281f1c5d, 0x6e440487, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
@@ -454,8 +450,11 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
- PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
+ PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
+ PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
+ PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
+ PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c383410..075f446b3139 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_nortel_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e7..bda5317cc596 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
/* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
/* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f5..e0d5874ab42f 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_plx_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc71..88cbc7902aa0 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_tmd_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
{0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
{0,},
};
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index d2f10e9c2162..330d42d45333 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -3,6 +3,7 @@
* See copyright notice in main.c
*/
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 7698fdd6a3a2..fbcc6e1a2e1d 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -2,6 +2,7 @@
*
* See copyright notice in main.c
*/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/wireless.h>
@@ -23,7 +24,7 @@
#define MAX_RID_LEN 1024
/* Helper routine to record keys
- * Do not call from interrupt context */
+ * It is called under orinoco_lock so it may not sleep */
static int orinoco_set_key(struct orinoco_private *priv, int index,
enum orinoco_alg alg, const u8 *key, int key_len,
const u8 *seq, int seq_len)
@@ -32,14 +33,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
kzfree(priv->keys[index].seq);
if (key_len) {
- priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
+ priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
if (!priv->keys[index].key)
goto nomem;
} else
priv->keys[index].key = NULL;
if (seq_len) {
- priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
+ priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
if (!priv->keys[index].seq)
goto free_key;
} else
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 8e3818f6832e..187e263b045a 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -20,6 +20,7 @@
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/sort.h>
+#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index e7b9e9cb39f5..c43a5d461ab2 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -17,6 +17,7 @@
*/
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 18012dbfb45d..a7cb9eb759a1 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -17,6 +17,7 @@
*/
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
@@ -33,21 +34,29 @@ MODULE_DESCRIPTION("Softmac Prism54 common code");
MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54common");
+static int p54_sta_add_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct p54_common *priv = hw->priv;
+
+ /*
+ * Notify the firmware that we don't want or we don't
+ * need to buffer frames for this station anymore.
+ */
+
+ p54_sta_unlock(priv, sta->addr);
+
+ return 0;
+}
+
static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
enum sta_notify_cmd notify_cmd,
struct ieee80211_sta *sta)
{
struct p54_common *priv = dev->priv;
- switch (notify_cmd) {
- case STA_NOTIFY_ADD:
- case STA_NOTIFY_REMOVE:
- /*
- * Notify the firmware that we don't want or we don't
- * need to buffer frames for this station anymore.
- */
- p54_sta_unlock(priv, sta->addr);
- break;
+ switch (notify_cmd) {
case STA_NOTIFY_AWAKE:
/* update the firmware's filter table */
p54_sta_unlock(priv, sta->addr);
@@ -216,7 +225,7 @@ static void p54_stop(struct ieee80211_hw *dev)
}
static int p54_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
@@ -226,28 +235,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
mutex_unlock(&priv->conf_mutex);
return -EOPNOTSUPP;
}
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
return 0;
}
static void p54_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
@@ -358,16 +367,6 @@ static int p54_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int p54_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct p54_common *priv = dev->priv;
-
- memcpy(stats, &priv->tx_stats[P54_QUEUE_DATA],
- sizeof(stats[0]) * dev->queues);
- return 0;
-}
-
static void p54_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -516,13 +515,14 @@ static const struct ieee80211_ops p54_ops = {
.remove_interface = p54_remove_interface,
.set_tim = p54_set_tim,
.sta_notify = p54_sta_notify,
+ .sta_add = p54_sta_add_remove,
+ .sta_remove = p54_sta_add_remove,
.set_key = p54_set_key,
.config = p54_config,
.bss_info_changed = p54_bss_info_changed,
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
.get_stats = p54_get_stats,
- .get_tx_stats = p54_get_tx_stats
};
struct ieee80211_hw *p54_init_common(size_t priv_data_len)
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 1afc39410e85..43a3b2ead81a 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -157,6 +157,12 @@ struct p54_led_dev {
#endif /* CONFIG_P54_LEDS */
+struct p54_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
+};
+
struct p54_common {
struct ieee80211_hw *hw;
struct ieee80211_vif *vif;
@@ -183,7 +189,7 @@ struct p54_common {
/* (e)DCF / QOS state */
bool use_short_slot;
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[8];
+ struct p54_tx_queue_stats tx_stats[8];
struct p54_edcf_queue_param qos_params[8];
/* Radio data */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a15962a19b2a..269fda362836 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
@@ -31,7 +32,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54pci");
MODULE_FIRMWARE("isl3886pci");
-static struct pci_device_id p54p_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3890) },
/* 3COM 3CRWE154G72 Wireless LAN adapter */
@@ -157,6 +158,14 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ dev_kfree_skb_any(skb);
+ dev_err(&priv->pdev->dev,
+ "RX DMA Mapping error\n");
+ break;
+ }
+
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
@@ -197,6 +206,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
i %= ring_limit;
continue;
}
+
+ if (unlikely(len > priv->common.rx_mtu)) {
+ if (net_ratelimit())
+ dev_err(&priv->pdev->dev, "rx'd frame size "
+ "exceeds length threshold.\n");
+
+ len = priv->common.rx_mtu;
+ }
skb_put(skb, len);
if (p54_rx(dev, skb)) {
@@ -218,14 +235,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
}
-/* caller must hold priv->lock */
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
- void **tx_buf)
+ struct sk_buff **tx_buf)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
struct p54p_desc *desc;
+ struct sk_buff *skb;
u32 idx, i;
i = (*index) % ring_limit;
@@ -234,9 +251,8 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
desc = &ring[i];
- if (tx_buf[i])
- if (FREE_AFTER_TX((struct sk_buff *) tx_buf[i]))
- p54_free_skb(dev, tx_buf[i]);
+
+ skb = tx_buf[i];
tx_buf[i] = NULL;
pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
@@ -247,17 +263,28 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
desc->len = 0;
desc->flags = 0;
+ if (skb && FREE_AFTER_TX(skb))
+ p54_free_skb(dev, skb);
+
i++;
i %= ring_limit;
}
}
-static void p54p_rx_tasklet(unsigned long dev_id)
+static void p54p_tasklet(unsigned long dev_id)
{
struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
+ p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
+ ARRAY_SIZE(ring_control->tx_mgmt),
+ priv->tx_buf_mgmt);
+
+ p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
+ ARRAY_SIZE(ring_control->tx_data),
+ priv->tx_buf_data);
+
p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
@@ -272,59 +299,49 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
struct p54p_priv *priv = dev->priv;
- struct p54p_ring_control *ring_control = priv->ring_control;
__le32 reg;
- spin_lock(&priv->lock);
reg = P54P_READ(int_ident);
if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+ goto out;
}
-
P54P_WRITE(int_ack, reg);
reg &= P54P_READ(int_enable);
- if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
- p54p_check_tx_ring(dev, &priv->tx_idx_mgmt,
- 3, ring_control->tx_mgmt,
- ARRAY_SIZE(ring_control->tx_mgmt),
- priv->tx_buf_mgmt);
-
- p54p_check_tx_ring(dev, &priv->tx_idx_data,
- 1, ring_control->tx_data,
- ARRAY_SIZE(ring_control->tx_data),
- priv->tx_buf_data);
-
- tasklet_schedule(&priv->rx_tasklet);
-
- } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
+ if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
+ tasklet_schedule(&priv->tasklet);
+ else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
complete(&priv->boot_comp);
- spin_unlock(&priv->lock);
-
+out:
return reg ? IRQ_HANDLED : IRQ_NONE;
}
static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
+ unsigned long flags;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
- unsigned long flags;
struct p54p_desc *desc;
dma_addr_t mapping;
u32 device_idx, idx, i;
spin_lock_irqsave(&priv->lock, flags);
-
device_idx = le32_to_cpu(ring_control->device_idx[1]);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
- priv->tx_buf_data[i] = skb;
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ p54_free_skb(dev, skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return ;
+ }
+ priv->tx_buf_data[i] = skb;
+
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
@@ -346,14 +363,14 @@ static void p54p_stop(struct ieee80211_hw *dev)
unsigned int i;
struct p54p_desc *desc;
- tasklet_kill(&priv->rx_tasklet);
-
P54P_WRITE(int_enable, cpu_to_le32(0));
P54P_READ(int_enable);
udelay(10);
free_irq(priv->pdev->irq, dev);
+ tasklet_kill(&priv->tasklet);
+
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
@@ -537,7 +554,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
priv->common.tx = p54p_tx;
spin_lock_init(&priv->lock);
- tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
err = request_firmware(&priv->firmware, "isl3886pci",
&priv->pdev->dev);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index fbb683953fb2..2feead617a3b 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -92,7 +92,7 @@ struct p54p_priv {
struct p54_common common;
struct pci_dev *pdev;
struct p54p_csr __iomem *map;
- struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tasklet;
const struct firmware *firmware;
spinlock_t lock;
struct p54p_ring_control *ring_control;
@@ -101,8 +101,8 @@ struct p54p_priv {
u32 rx_idx_mgmt, tx_idx_mgmt;
struct sk_buff *rx_buf_data[8];
struct sk_buff *rx_buf_mgmt[4];
- void *tx_buf_data[32];
- void *tx_buf_mgmt[4];
+ struct sk_buff *tx_buf_data[32];
+ struct sk_buff *tx_buf_mgmt[4];
struct completion boot_comp;
};
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index afd26bf06649..c8f09da1f84d 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -29,6 +29,7 @@
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/gpio.h>
+#include <linux/slab.h>
#include "p54spi.h"
#include "p54spi_eeprom.h"
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 92af9b96bb7a..743a6c68b29d 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
@@ -35,7 +36,9 @@ MODULE_FIRMWARE("isl3887usb");
static struct usb_device_id p54u_table[] __devinitdata = {
/* Version 1 devices (pci chip + net2280) */
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
+ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
{USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
+ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
{USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
{USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
{USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
@@ -60,6 +63,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
{USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
{USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */
{USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index b6dda2b27fb5..66057999a93c 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -183,10 +183,10 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
struct sk_buff *skb,
const u16 p54_queue)
{
- struct ieee80211_tx_queue_stats *queue;
+ struct p54_tx_queue_stats *queue;
unsigned long flags;
- if (WARN_ON(p54_queue > P54_QUEUE_NUM))
+ if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
return -EINVAL;
queue = &priv->tx_stats[p54_queue];
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index f7f5c793514b..a45818ebfdfb 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index a3ba3539db02..689d59a13d5b 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 872b64783e78..ac99eaaeabce 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -17,6 +17,7 @@
*/
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f2..dc14420a9adc 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
-static const struct pci_device_id prism54_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{
0x1260, 0x3890,
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 69d2f882fd06..adb289723a96 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/system.h>
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 87a1734663da..0b27e50fe0d5 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -22,6 +22,7 @@
#include <linux/wireless.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
/*
* Function definitions
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 1187e6112a64..d66933d70fb9 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include "prismcompat.h"
#include "islpci_dev.h"
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88e1e4e32b22..11865ea21875 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -35,7 +35,6 @@
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/init.h>
@@ -1871,10 +1870,8 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
/*===========================================================================*/
static void ray_update_multi_list(struct net_device *dev, int all)
{
- struct dev_mc_list *dmi, **dmip;
int ccsindex;
struct ccs __iomem *pccs;
- int i = 0;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
void __iomem *p = local->sram + HOST_TO_ECF_BASE;
@@ -1895,9 +1892,11 @@ static void ray_update_multi_list(struct net_device *dev, int all)
writeb(0xff, &pccs->var);
local->num_multi = 0xff;
} else {
+ struct dev_mc_list *dmi;
+ int i = 0;
+
/* Copy the kernel's list of MC addresses to card */
- for (dmip = &dev->mc_list; (dmi = *dmip) != NULL;
- dmip = &dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
dev_dbg(&link->dev,
"ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
@@ -1950,7 +1949,7 @@ static void set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
ray_update_multi_list(dev, 1);
else {
- if (local->num_multi != dev->mc_count)
+ if (local->num_multi != netdev_mc_count(dev))
ray_update_multi_list(dev, 0);
}
} /* end set_multicast_list */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2ecbedb26e15..1de5b22d3efe 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -41,6 +41,7 @@
#include <linux/if_arp.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
+#include <linux/slab.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include <linux/usb/usbnet.h>
@@ -728,9 +729,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
ret = rndis_command(dev, u.header, buflen);
priv->current_command_oid = 0;
if (ret < 0)
- devdbg(dev, "rndis_query_oid(%s): rndis_command() failed, %d "
- "(%08x)", oid_to_string(oid), ret,
- le32_to_cpu(u.get_c->status));
+ netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
+ __func__, oid_to_string(oid), ret,
+ le32_to_cpu(u.get_c->status));
if (ret == 0) {
memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
@@ -741,9 +742,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
ret = rndis_error_status(u.get_c->status);
if (ret < 0)
- devdbg(dev, "rndis_query_oid(%s): device returned "
- "error, 0x%08x (%d)", oid_to_string(oid),
- le32_to_cpu(u.get_c->status), ret);
+ netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
+ __func__, oid_to_string(oid),
+ le32_to_cpu(u.get_c->status), ret);
}
mutex_unlock(&priv->command_lock);
@@ -791,17 +792,17 @@ static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
ret = rndis_command(dev, u.header, buflen);
priv->current_command_oid = 0;
if (ret < 0)
- devdbg(dev, "rndis_set_oid(%s): rndis_command() failed, %d "
- "(%08x)", oid_to_string(oid), ret,
- le32_to_cpu(u.set_c->status));
+ netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
+ __func__, oid_to_string(oid), ret,
+ le32_to_cpu(u.set_c->status));
if (ret == 0) {
ret = rndis_error_status(u.set_c->status);
if (ret < 0)
- devdbg(dev, "rndis_set_oid(%s): device returned error, "
- "0x%08x (%d)", oid_to_string(oid),
- le32_to_cpu(u.set_c->status), ret);
+ netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
+ __func__, oid_to_string(oid),
+ le32_to_cpu(u.set_c->status), ret);
}
mutex_unlock(&priv->command_lock);
@@ -870,11 +871,11 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
#endif
if (value_type == 2)
- devdbg(dev, "setting config parameter: %s, value: %s",
- param, (u8 *)value);
+ netdev_dbg(dev->net, "setting config parameter: %s, value: %s\n",
+ param, (u8 *)value);
else
- devdbg(dev, "setting config parameter: %s, value: %d",
- param, *(u32 *)value);
+ netdev_dbg(dev->net, "setting config parameter: %s, value: %d\n",
+ param, *(u32 *)value);
infobuf->name_offs = cpu_to_le32(sizeof(*infobuf));
infobuf->name_length = cpu_to_le32(param_len);
@@ -897,20 +898,21 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
}
#ifdef DEBUG
- devdbg(dev, "info buffer (len: %d):", info_len);
+ netdev_dbg(dev->net, "info buffer (len: %d)\n", info_len);
for (i = 0; i < info_len; i += 12) {
u32 *tmp = (u32 *)((u8 *)infobuf + i);
- devdbg(dev, "%08X:%08X:%08X",
- cpu_to_be32(tmp[0]),
- cpu_to_be32(tmp[1]),
- cpu_to_be32(tmp[2]));
+ netdev_dbg(dev->net, "%08X:%08X:%08X\n",
+ cpu_to_be32(tmp[0]),
+ cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]));
}
#endif
ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER,
infobuf, info_len);
if (ret != 0)
- devdbg(dev, "setting rndis config parameter failed, %d.", ret);
+ netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n",
+ ret);
kfree(infobuf);
return ret;
@@ -945,13 +947,13 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid));
if (ret < 0) {
- devwarn(usbdev, "setting SSID failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret);
return ret;
}
if (ret == 0) {
memcpy(&priv->essid, ssid, sizeof(priv->essid));
priv->radio_on = true;
- devdbg(usbdev, "set_essid: radio_on = true");
+ netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__);
}
return ret;
@@ -963,7 +965,8 @@ static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN);
if (ret < 0) {
- devwarn(usbdev, "setting BSSID[%pM] failed (%08X)", bssid, ret);
+ netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n",
+ bssid, ret);
return ret;
}
@@ -1021,7 +1024,8 @@ static int disassociate(struct usbnet *usbdev, bool reset_ssid)
ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0);
if (ret == 0) {
priv->radio_on = false;
- devdbg(usbdev, "disassociate: radio_on = false");
+ netdev_dbg(usbdev->net, "%s(): radio_on = false\n",
+ __func__);
if (reset_ssid)
msleep(100);
@@ -1054,8 +1058,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
__le32 tmp;
int auth_mode, ret;
- devdbg(usbdev, "set_auth_mode: wpa_version=0x%x authalg=0x%x "
- "keymgmt=0x%x", wpa_version, auth_type, keymgmt);
+ netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x authalg=0x%x keymgmt=0x%x\n",
+ __func__, wpa_version, auth_type, keymgmt);
if (wpa_version & NL80211_WPA_VERSION_2) {
if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X)
@@ -1082,7 +1086,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp,
sizeof(tmp));
if (ret != 0) {
- devwarn(usbdev, "setting auth mode failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n",
+ ret);
return ret;
}
@@ -1098,7 +1103,8 @@ static int set_priv_filter(struct usbnet *usbdev)
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
- devdbg(usbdev, "set_priv_filter: wpa_version=0x%x", priv->wpa_version);
+ netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x\n",
+ __func__, priv->wpa_version);
if (priv->wpa_version & NL80211_WPA_VERSION_2 ||
priv->wpa_version & NL80211_WPA_VERSION_1)
@@ -1116,8 +1122,8 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
__le32 tmp;
int encr_mode, ret;
- devdbg(usbdev, "set_encr_mode: cipher_pair=0x%x cipher_group=0x%x",
- pairwise, groupwise);
+ netdev_dbg(usbdev->net, "%s(): cipher_pair=0x%x cipher_group=0x%x\n",
+ __func__, pairwise, groupwise);
if (pairwise & RNDIS_WLAN_ALG_CCMP)
encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
@@ -1136,7 +1142,8 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp,
sizeof(tmp));
if (ret != 0) {
- devwarn(usbdev, "setting encr mode failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n",
+ ret);
return ret;
}
@@ -1151,13 +1158,15 @@ static int set_infra_mode(struct usbnet *usbdev, int mode)
__le32 tmp;
int ret;
- devdbg(usbdev, "set_infra_mode: infra_mode=0x%x", priv->infra_mode);
+ netdev_dbg(usbdev->net, "%s(): infra_mode=0x%x\n",
+ __func__, priv->infra_mode);
tmp = cpu_to_le32(mode);
ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp,
sizeof(tmp));
if (ret != 0) {
- devwarn(usbdev, "setting infra mode failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n",
+ ret);
return ret;
}
@@ -1174,7 +1183,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
{
__le32 tmp;
- devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
+ netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
if (rts_threshold < 0 || rts_threshold > 2347)
rts_threshold = 2347;
@@ -1188,7 +1197,7 @@ static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold)
{
__le32 tmp;
- devdbg(usbdev, "set_frag_threshold %i", frag_threshold);
+ netdev_dbg(usbdev->net, "%s(): %i\n", __func__, frag_threshold);
if (frag_threshold < 256 || frag_threshold > 2346)
frag_threshold = 2346;
@@ -1222,7 +1231,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
unsigned int dsconfig;
int len, ret;
- devdbg(usbdev, "set_channel(%d)", channel);
+ netdev_dbg(usbdev->net, "%s(%d)\n", __func__, channel);
/* this OID is valid only when not associated */
if (is_associated(usbdev))
@@ -1233,7 +1242,8 @@ static int set_channel(struct usbnet *usbdev, int channel)
len = sizeof(config);
ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
if (ret < 0) {
- devdbg(usbdev, "set_channel: querying configuration failed");
+ netdev_dbg(usbdev->net, "%s(): querying configuration failed\n",
+ __func__);
return ret;
}
@@ -1241,7 +1251,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config,
sizeof(config));
- devdbg(usbdev, "set_channel: %d -> %d", channel, ret);
+ netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret);
return ret;
}
@@ -1255,7 +1265,8 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
u32 cipher;
int ret;
- devdbg(usbdev, "add_wep_key(idx: %d, len: %d)", index, key_len);
+ netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n",
+ __func__, index, key_len);
if ((key_len != 5 && key_len != 13) || index < 0 || index > 3)
return -EINVAL;
@@ -1277,15 +1288,15 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP,
RNDIS_WLAN_ALG_NONE);
if (ret)
- devwarn(usbdev, "encryption couldn't be enabled (%08X)",
- ret);
+ netdev_warn(usbdev->net, "encryption couldn't be enabled (%08X)\n",
+ ret);
}
ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key,
sizeof(ndis_key));
if (ret != 0) {
- devwarn(usbdev, "adding encryption key %d failed (%08X)",
- index+1, ret);
+ netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n",
+ index + 1, ret);
return ret;
}
@@ -1307,22 +1318,23 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
int ret;
if (index < 0 || index >= 4) {
- devdbg(usbdev, "add_wpa_key: index out of range (%i)", index);
+ netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n",
+ __func__, index);
return -EINVAL;
}
if (key_len > sizeof(ndis_key.material) || key_len < 0) {
- devdbg(usbdev, "add_wpa_key: key length out of range (%i)",
- key_len);
+ netdev_dbg(usbdev->net, "%s(): key length out of range (%i)\n",
+ __func__, key_len);
return -EINVAL;
}
if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) {
if (!rx_seq || seq_len <= 0) {
- devdbg(usbdev, "add_wpa_key: recv seq flag without"
- "buffer");
+ netdev_dbg(usbdev->net, "%s(): recv seq flag without buffer\n",
+ __func__);
return -EINVAL;
}
if (rx_seq && seq_len > sizeof(ndis_key.rsc)) {
- devdbg(usbdev, "add_wpa_key: too big recv seq buffer");
+ netdev_dbg(usbdev->net, "%s(): too big recv seq buffer\n", __func__);
return -EINVAL;
}
}
@@ -1330,15 +1342,16 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
is_addr_ok = addr && !is_zero_ether_addr(addr) &&
!is_broadcast_ether_addr(addr);
if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) {
- devdbg(usbdev, "add_wpa_key: pairwise but bssid invalid (%pM)",
- addr);
+ netdev_dbg(usbdev->net, "%s(): pairwise but bssid invalid (%pM)\n",
+ __func__, addr);
return -EINVAL;
}
- devdbg(usbdev, "add_wpa_key(%i): flags:%i%i%i", index,
- !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
- !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
- !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
+ netdev_dbg(usbdev->net, "%s(%i): flags:%i%i%i\n",
+ __func__, index,
+ !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
+ !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
+ !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
memset(&ndis_key, 0, sizeof(ndis_key));
@@ -1372,7 +1385,8 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key,
le32_to_cpu(ndis_key.size));
- devdbg(usbdev, "add_wpa_key: OID_802_11_ADD_KEY -> %08X", ret);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n",
+ __func__, ret);
if (ret != 0)
return ret;
@@ -1401,7 +1415,7 @@ static int restore_key(struct usbnet *usbdev, int key_idx)
key = priv->encr_keys[key_idx];
- devdbg(usbdev, "restore_key: %i:%i", key_idx, key.len);
+ netdev_dbg(usbdev->net, "%s(): %i:%i\n", __func__, key_idx, key.len);
if (key.len == 0)
return 0;
@@ -1436,8 +1450,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
is_wpa = is_wpa_key(priv, index);
- devdbg(usbdev, "remove_key: %i:%s:%i", index, is_wpa ? "wpa" : "wep",
- priv->encr_keys[index].len);
+ netdev_dbg(usbdev->net, "%s(): %i:%s:%i\n",
+ __func__, index, is_wpa ? "wpa" : "wep",
+ priv->encr_keys[index].len);
clear_key(priv, index);
@@ -1464,9 +1479,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex,
sizeof(keyindex));
if (ret != 0) {
- devwarn(usbdev,
- "removing encryption key %d failed (%08X)",
- index, ret);
+ netdev_warn(usbdev->net,
+ "removing encryption key %d failed (%08X)\n",
+ index, ret);
return ret;
}
}
@@ -1482,59 +1497,76 @@ static void set_multicast_list(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct dev_mc_list *mclist;
- __le32 filter;
- int ret, i, size;
- char *buf;
+ __le32 filter, basefilter;
+ int ret;
+ char *mc_addrs = NULL;
+ int mc_count;
- filter = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST;
+ basefilter = filter = RNDIS_PACKET_TYPE_DIRECTED |
+ RNDIS_PACKET_TYPE_BROADCAST;
if (usbdev->net->flags & IFF_PROMISC) {
filter |= RNDIS_PACKET_TYPE_PROMISCUOUS |
RNDIS_PACKET_TYPE_ALL_LOCAL;
- } else if (usbdev->net->flags & IFF_ALLMULTI ||
- usbdev->net->mc_count > priv->multicast_size) {
+ } else if (usbdev->net->flags & IFF_ALLMULTI) {
+ filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
+ }
+
+ if (filter != basefilter)
+ goto set_filter;
+
+ /*
+ * mc_list should be accessed holding the lock, so copy addresses to
+ * local buffer first.
+ */
+ netif_addr_lock_bh(usbdev->net);
+ mc_count = netdev_mc_count(usbdev->net);
+ if (mc_count > priv->multicast_size) {
filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
- } else if (usbdev->net->mc_count > 0) {
- size = min(priv->multicast_size, usbdev->net->mc_count);
- buf = kmalloc(size * ETH_ALEN, GFP_KERNEL);
- if (!buf) {
- devwarn(usbdev,
- "couldn't alloc %d bytes of memory",
- size * ETH_ALEN);
+ } else if (mc_count) {
+ int i = 0;
+
+ mc_addrs = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
+ if (!mc_addrs) {
+ netdev_warn(usbdev->net,
+ "couldn't alloc %d bytes of memory\n",
+ mc_count * ETH_ALEN);
+ netif_addr_unlock_bh(usbdev->net);
return;
}
- mclist = usbdev->net->mc_list;
- for (i = 0; i < size && mclist; mclist = mclist->next) {
- if (mclist->dmi_addrlen != ETH_ALEN)
- continue;
+ netdev_for_each_mc_addr(mclist, usbdev->net)
+ memcpy(mc_addrs + i++ * ETH_ALEN,
+ mclist->dmi_addr, ETH_ALEN);
+ }
+ netif_addr_unlock_bh(usbdev->net);
- memcpy(buf + i * ETH_ALEN, mclist->dmi_addr, ETH_ALEN);
- i++;
- }
+ if (filter != basefilter)
+ goto set_filter;
- ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, buf,
- i * ETH_ALEN);
- if (ret == 0 && i > 0)
+ if (mc_count) {
+ ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, mc_addrs,
+ mc_count * ETH_ALEN);
+ kfree(mc_addrs);
+ if (ret == 0)
filter |= RNDIS_PACKET_TYPE_MULTICAST;
else
filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
- devdbg(usbdev, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d",
- i, priv->multicast_size, ret);
-
- kfree(buf);
+ netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n",
+ mc_count, priv->multicast_size, ret);
}
+set_filter:
ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter,
sizeof(filter));
if (ret < 0) {
- devwarn(usbdev, "couldn't set packet filter: %08x",
- le32_to_cpu(filter));
+ netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n",
+ le32_to_cpu(filter));
}
- devdbg(usbdev, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d",
- le32_to_cpu(filter), ret);
+ netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n",
+ le32_to_cpu(filter), ret);
}
/*
@@ -1592,7 +1624,8 @@ static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type,
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "rndis_set_tx_power type:0x%x dbm:%i", type, dbm);
+ netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n",
+ __func__, type, dbm);
/* Device doesn't support changing txpower after initialization, only
* turn off/on radio. Support 'auto' mode and setting same dBm that is
@@ -1615,7 +1648,7 @@ static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm)
*dbm = get_bcm4320_power_dbm(priv);
- devdbg(usbdev, "rndis_get_tx_power dbm:%i", *dbm);
+ netdev_dbg(usbdev->net, "%s(): dbm:%i\n", __func__, *dbm);
return 0;
}
@@ -1629,7 +1662,7 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
int ret;
__le32 tmp;
- devdbg(usbdev, "cfg80211.scan");
+ netdev_dbg(usbdev->net, "cfg80211.scan\n");
/* Get current bssid list from device before new scan, as new scan
* clears internal bssid list.
@@ -1669,8 +1702,8 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
int ie_len, bssid_len;
u8 *ie;
- devdbg(usbdev, " found bssid: '%.32s' [%pM]", bssid->ssid.essid,
- bssid->mac);
+ netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM]\n",
+ bssid->ssid.essid, bssid->mac);
/* parse bssid structure */
bssid_len = le32_to_cpu(bssid->length);
@@ -1712,7 +1745,7 @@ static int rndis_check_bssid_list(struct usbnet *usbdev)
int ret = -EINVAL, len, count, bssid_len;
bool resized = false;
- devdbg(usbdev, "check_bssid_list");
+ netdev_dbg(usbdev->net, "check_bssid_list\n");
len = CONTROL_BUFFER_SIZE;
resize_buf:
@@ -1736,8 +1769,8 @@ resize_buf:
bssid = bssid_list->bssid;
bssid_len = le32_to_cpu(bssid->length);
count = le32_to_cpu(bssid_list->num_items);
- devdbg(usbdev, "check_bssid_list: %d BSSIDs found (buflen: %d)", count,
- len);
+ netdev_dbg(usbdev->net, "check_bssid_list: %d BSSIDs found (buflen: %d)\n",
+ count, len);
while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
rndis_bss_info_update(usbdev, bssid);
@@ -1759,7 +1792,7 @@ static void rndis_get_scan_results(struct work_struct *work)
struct usbnet *usbdev = priv->usbdev;
int ret;
- devdbg(usbdev, "get_scan_results");
+ netdev_dbg(usbdev->net, "get_scan_results\n");
if (!priv->scan_request)
return;
@@ -1793,7 +1826,7 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
if (sme->crypto.n_ciphers_pairwise > 0 &&
pairwise == RNDIS_WLAN_ALG_NONE) {
- deverr(usbdev, "Unsupported pairwise cipher");
+ netdev_err(usbdev->net, "Unsupported pairwise cipher\n");
return -ENOTSUPP;
}
@@ -1803,28 +1836,30 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
if (sme->crypto.n_akm_suites > 0 &&
keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) {
- deverr(usbdev, "Invalid keymgmt");
+ netdev_err(usbdev->net, "Invalid keymgmt\n");
return -ENOTSUPP;
}
- devdbg(usbdev, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:"
- "0x%x]:0x%x)", sme->ssid, sme->bssid, chan,
- sme->privacy, sme->crypto.wpa_versions, sme->auth_type,
- groupwise, pairwise, keymgmt);
+ netdev_dbg(usbdev->net, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:0x%x]:0x%x)\n",
+ sme->ssid, sme->bssid, chan,
+ sme->privacy, sme->crypto.wpa_versions, sme->auth_type,
+ groupwise, pairwise, keymgmt);
if (is_associated(usbdev))
disassociate(usbdev, false);
ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
if (ret < 0) {
- devdbg(usbdev, "connect: set_infra_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_infra_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type,
keymgmt);
if (ret < 0) {
- devdbg(usbdev, "connect: set_auth_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_auth_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
@@ -1832,14 +1867,16 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
ret = set_encr_mode(usbdev, pairwise, groupwise);
if (ret < 0) {
- devdbg(usbdev, "connect: set_encr_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_encr_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
if (channel) {
ret = set_channel(usbdev, chan);
if (ret < 0) {
- devdbg(usbdev, "connect: set_channel failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_channel failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
}
@@ -1848,8 +1885,8 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
priv->encr_tx_key_index = sme->key_idx;
ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx);
if (ret < 0) {
- devdbg(usbdev, "connect: add_wep_key failed, %d "
- "(%d, %d)", ret, sme->key_len, sme->key_idx);
+ netdev_dbg(usbdev->net, "connect: add_wep_key failed, %d (%d, %d)\n",
+ ret, sme->key_len, sme->key_idx);
goto err_turn_radio_on;
}
}
@@ -1858,7 +1895,8 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
!is_broadcast_ether_addr(sme->bssid)) {
ret = set_bssid(usbdev, sme->bssid);
if (ret < 0) {
- devdbg(usbdev, "connect: set_bssid failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_bssid failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
} else
@@ -1880,7 +1918,7 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
ret = set_essid(usbdev, &ssid);
if (ret < 0)
- devdbg(usbdev, "connect: set_essid failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_essid failed, %d\n", ret);
return ret;
err_turn_radio_on:
@@ -1895,7 +1933,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "cfg80211.disconnect(%d)", reason_code);
+ netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code);
priv->connected = false;
memset(priv->bssid, 0, ETH_ALEN);
@@ -1929,21 +1967,23 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
alg = RNDIS_WLAN_ALG_NONE;
}
- devdbg(usbdev, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)", params->ssid,
- params->bssid, chan, params->privacy);
+ netdev_dbg(usbdev->net, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)\n",
+ params->ssid, params->bssid, chan, params->privacy);
if (is_associated(usbdev))
disassociate(usbdev, false);
ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_infra_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_infra_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_auth_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_auth_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
@@ -1951,15 +1991,16 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_encr_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_encr_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
if (channel) {
ret = set_channel(usbdev, chan);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_channel failed, %d",
- ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_channel failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
}
@@ -1968,7 +2009,8 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
!is_broadcast_ether_addr(params->bssid)) {
ret = set_bssid(usbdev, params->bssid);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_bssid failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_bssid failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
} else
@@ -1988,7 +2030,8 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
ret = set_essid(usbdev, &ssid);
if (ret < 0)
- devdbg(usbdev, "join_ibss: set_essid failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_essid failed, %d\n",
+ ret);
return ret;
err_turn_radio_on:
@@ -2002,7 +2045,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "cfg80211.leave_ibss()");
+ netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n");
priv->connected = false;
memset(priv->bssid, 0, ETH_ALEN);
@@ -2028,8 +2071,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
struct usbnet *usbdev = priv->usbdev;
__le32 flags;
- devdbg(usbdev, "rndis_add_key(%i, %pM, %08x)", key_index, mac_addr,
- params->cipher);
+ netdev_dbg(usbdev->net, "%s(%i, %pM, %08x)\n",
+ __func__, key_index, mac_addr, params->cipher);
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -2050,8 +2093,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
key_index, mac_addr, params->seq,
params->seq_len, params->cipher, flags);
default:
- devdbg(usbdev, "rndis_add_key: unsupported cipher %08x",
- params->cipher);
+ netdev_dbg(usbdev->net, "%s(): unsupported cipher %08x\n",
+ __func__, params->cipher);
return -ENOTSUPP;
}
}
@@ -2062,7 +2105,7 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "rndis_del_key(%i, %pM)", key_index, mac_addr);
+ netdev_dbg(usbdev->net, "%s(%i, %pM)\n", __func__, key_index, mac_addr);
return remove_key(usbdev, key_index, mac_addr);
}
@@ -2074,7 +2117,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
struct usbnet *usbdev = priv->usbdev;
struct rndis_wlan_encr_key key;
- devdbg(usbdev, "rndis_set_default_key(%i)", key_index);
+ netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
priv->encr_tx_key_index = key_index;
@@ -2188,7 +2231,8 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
if (ret < 0)
memset(bssid, 0, sizeof(bssid));
- devdbg(usbdev, "link up work: [%pM] %s", bssid, roamed ? "roamed" : "");
+ netdev_dbg(usbdev->net, "link up work: [%pM]%s\n",
+ bssid, roamed ? " roamed" : "");
/* Internal bss list in device always contains at least the currently
* connected bss and we can get it to cfg80211 with
@@ -2270,8 +2314,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
/* must have at least one array entry */
if (len < offsetof(struct ndis_80211_status_indication, u) +
sizeof(struct ndis_80211_auth_request)) {
- devinfo(usbdev, "authentication indication: "
- "too short message (%i)", len);
+ netdev_info(usbdev->net, "authentication indication: too short message (%i)\n",
+ len);
return;
}
@@ -2298,8 +2342,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
type = "group_error";
}
- devinfo(usbdev, "authentication indication: %s (0x%08x)", type,
- le32_to_cpu(auth_req->flags));
+ netdev_info(usbdev->net, "authentication indication: %s (0x%08x)\n",
+ type, le32_to_cpu(auth_req->flags));
if (pairwise_error) {
key_type = NL80211_KEYTYPE_PAIRWISE;
@@ -2335,8 +2379,8 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
if (len < offsetof(struct ndis_80211_status_indication, u) +
sizeof(struct ndis_80211_pmkid_cand_list)) {
- devinfo(usbdev, "pmkid candidate list indication: "
- "too short message (%i)", len);
+ netdev_info(usbdev->net, "pmkid candidate list indication: too short message (%i)\n",
+ len);
return;
}
@@ -2346,18 +2390,16 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
offsetof(struct ndis_80211_status_indication, u);
if (len < expected_len) {
- devinfo(usbdev, "pmkid candidate list indication: "
- "list larger than buffer (%i < %i)",
- len, expected_len);
+ netdev_info(usbdev->net, "pmkid candidate list indication: list larger than buffer (%i < %i)\n",
+ len, expected_len);
return;
}
cand_list = &indication->u.cand_list;
- devinfo(usbdev, "pmkid candidate list indication: "
- "version %i, candidates %i",
- le32_to_cpu(cand_list->version),
- le32_to_cpu(cand_list->num_candidates));
+ netdev_info(usbdev->net, "pmkid candidate list indication: version %i, candidates %i\n",
+ le32_to_cpu(cand_list->version),
+ le32_to_cpu(cand_list->num_candidates));
if (le32_to_cpu(cand_list->version) != 1)
return;
@@ -2366,8 +2408,8 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
struct ndis_80211_pmkid_candidate *cand =
&cand_list->candidate_list[i];
- devdbg(usbdev, "cand[%i]: flags: 0x%08x, bssid: %pM",
- i, le32_to_cpu(cand->flags), cand->bssid);
+ netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
+ i, le32_to_cpu(cand->flags), cand->bssid);
#if 0
struct iw_pmkid_cand pcand;
@@ -2398,15 +2440,14 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
len = le32_to_cpu(msg->length);
if (len < 8) {
- devinfo(usbdev, "media specific indication, "
- "ignore too short message (%i < 8)", len);
+ netdev_info(usbdev->net, "media specific indication, ignore too short message (%i < 8)\n",
+ len);
return;
}
if (offset + len > buflen) {
- devinfo(usbdev, "media specific indication, "
- "too large to fit to buffer (%i > %i)",
- offset + len, buflen);
+ netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n",
+ offset + len, buflen);
return;
}
@@ -2414,13 +2455,13 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
switch (le32_to_cpu(indication->status_type)) {
case NDIS_80211_STATUSTYPE_RADIOSTATE:
- devinfo(usbdev, "radio state indication: %i",
- le32_to_cpu(indication->u.radio_status));
+ netdev_info(usbdev->net, "radio state indication: %i\n",
+ le32_to_cpu(indication->u.radio_status));
return;
case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE:
- devinfo(usbdev, "media stream mode indication: %i",
- le32_to_cpu(indication->u.media_stream_mode));
+ netdev_info(usbdev->net, "media stream mode indication: %i\n",
+ le32_to_cpu(indication->u.media_stream_mode));
return;
case NDIS_80211_STATUSTYPE_AUTHENTICATION:
@@ -2432,9 +2473,8 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
return;
default:
- devinfo(usbdev, "media specific indication: "
- "unknown status type 0x%08x",
- le32_to_cpu(indication->status_type));
+ netdev_info(usbdev->net, "media specific indication: unknown status type 0x%08x\n",
+ le32_to_cpu(indication->status_type));
}
}
@@ -2451,14 +2491,13 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
* and userspace to think that device is
* roaming/reassociating when it isn't.
*/
- devdbg(usbdev, "ignored OID_802_11_ADD_KEY triggered "
- "'media connect'");
+ netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n");
return;
}
usbnet_pause_rx(usbdev);
- devinfo(usbdev, "media connect");
+ netdev_info(usbdev->net, "media connect\n");
/* queue work to avoid recursive calls into rndis_command */
set_bit(WORK_LINK_UP, &priv->work_pending);
@@ -2466,7 +2505,7 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
break;
case RNDIS_STATUS_MEDIA_DISCONNECT:
- devinfo(usbdev, "media disconnect");
+ netdev_info(usbdev->net, "media disconnect\n");
/* queue work to avoid recursive calls into rndis_command */
set_bit(WORK_LINK_DOWN, &priv->work_pending);
@@ -2478,8 +2517,8 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
break;
default:
- devinfo(usbdev, "indication: 0x%08x",
- le32_to_cpu(msg->status));
+ netdev_info(usbdev->net, "indication: 0x%08x\n",
+ le32_to_cpu(msg->status));
break;
}
}
@@ -2544,8 +2583,8 @@ static void rndis_device_poller(struct work_struct *work)
if (ret == 0)
priv->last_qual = level_to_qual(le32_to_cpu(rssi));
- devdbg(usbdev, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d",
- ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
+ netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
+ ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
/* Workaround transfer stalls on poor quality links.
* TODO: find right way to fix these stalls (as stalls do not happen
@@ -2594,23 +2633,9 @@ end:
/*
* driver/device initialization
*/
-static int bcm4320a_early_init(struct usbnet *usbdev)
-{
- /* bcm4320a doesn't handle configuration parameters well. Try
- * set any and you get partially zeroed mac and broken device.
- */
-
- return 0;
-}
-
-static int bcm4320b_early_init(struct usbnet *usbdev)
+static void rndis_copy_module_params(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- char buf[8];
-
- /* Early initialization settings, setting these won't have effect
- * if called after generic_rndis_bind().
- */
priv->param_country[0] = modparam_country[0];
priv->param_country[1] = modparam_country[1];
@@ -2652,6 +2677,32 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
priv->param_workaround_interval = 500;
else
priv->param_workaround_interval = modparam_workaround_interval;
+}
+
+static int bcm4320a_early_init(struct usbnet *usbdev)
+{
+ /* copy module parameters for bcm4320a so that iwconfig reports txpower
+ * and workaround parameter is copied to private structure correctly.
+ */
+ rndis_copy_module_params(usbdev);
+
+ /* bcm4320a doesn't handle configuration parameters well. Try
+ * set any and you get partially zeroed mac and broken device.
+ */
+
+ return 0;
+}
+
+static int bcm4320b_early_init(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ char buf[8];
+
+ rndis_copy_module_params(usbdev);
+
+ /* Early initialization settings, setting these won't have effect
+ * if called after generic_rndis_bind().
+ */
rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
rndis_set_config_parameter_str(usbdev, "FrameBursting",
@@ -2826,11 +2877,11 @@ static int rndis_wlan_reset(struct usbnet *usbdev)
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int retval;
- devdbg(usbdev, "rndis_wlan_reset");
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
retval = rndis_reset(usbdev);
if (retval)
- devwarn(usbdev, "rndis_reset() failed: %d", retval);
+ netdev_warn(usbdev->net, "rndis_reset failed: %d\n", retval);
/* rndis_reset cleared multicast list, so restore here.
(set_multicast_list() also turns on current packet filter) */
@@ -2848,7 +2899,7 @@ static int rndis_wlan_stop(struct usbnet *usbdev)
int retval;
__le32 filter;
- devdbg(usbdev, "rndis_wlan_stop");
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
retval = disassociate(usbdev, false);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bf60689aaabb..5239e082cd0f 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -54,17 +54,17 @@ config RT61PCI
When compiled as a module, this driver will be called rt61pci.
config RT2800PCI_PCI
- tristate
+ boolean
depends on PCI
default y
config RT2800PCI_SOC
- tristate
+ boolean
depends on RALINK_RT288X || RALINK_RT305X
default y
config RT2800PCI
- tristate "Ralink rt2800 (PCI/PCMCIA) support (VERY EXPERIMENTAL)"
+ tristate "Ralink rt28xx/rt30xx/rt35xx (PCI/PCIe/PCMCIA) support (EXPERIMENTAL)"
depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
select RT2800_LIB
select RT2X00_LIB_PCI if RT2800PCI_PCI
@@ -75,7 +75,7 @@ config RT2800PCI
select CRC_CCITT
select EEPROM_93CX6
---help---
- This adds support for rt2800 wireless chipset family.
+ This adds support for rt2800/rt3000/rt3500 wireless chipset family.
Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
This driver is non-functional at the moment and is intended for
@@ -83,6 +83,32 @@ config RT2800PCI
When compiled as a module, this driver will be called "rt2800pci.ko".
+if RT2800PCI
+
+config RT2800PCI_RT30XX
+ bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
+ default n
+ ---help---
+ This adds support for rt30xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3090, RT3091 & RT3092
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+config RT2800PCI_RT35XX
+ bool "rt2800pci - Include support for rt35xx (PCI/PCIe/PCMCIA) devices"
+ default n
+ ---help---
+ This adds support for rt35xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3060, RT3062, RT3562, RT3592
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+endif
+
config RT2500USB
tristate "Ralink rt2500 (USB) support"
depends on USB
@@ -126,6 +152,43 @@ config RT2800USB
When compiled as a module, this driver will be called "rt2800usb.ko".
+if RT2800USB
+
+config RT2800USB_RT30XX
+ bool "rt2800usb - Include support for rt30xx (USB) devices"
+ default n
+ ---help---
+ This adds support for rt30xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT3070, RT3071 & RT3072
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+config RT2800USB_RT35XX
+ bool "rt2800usb - Include support for rt35xx (USB) devices"
+ default n
+ ---help---
+ This adds support for rt35xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT3572
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+config RT2800USB_UNKNOWN
+ bool "rt2800usb - Include support for unknown (USB) devices"
+ default n
+ ---help---
+ This adds support for rt2800 family devices that are known to
+ have a rt2800 family chipset, but for which the exact chipset
+ is unknown.
+
+ Support status for these devices is unknown, and enabling these
+ devices may or may not work.
+
+endif
+
config RT2800_LIB
tristate
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e7f46405a418..5f5204b82891 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00pci.h"
@@ -451,7 +452,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* RF2420 chipset don't need any additional actions.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2420))
+ if (rt2x00_rf(rt2x00dev, RF2420))
return;
/*
@@ -1340,11 +1341,10 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
- rt2x00_print_chip(rt2x00dev);
+ rt2x00_set_chip(rt2x00dev, RT2460, value,
+ rt2x00_get_field32(reg, CSR0_REVISION));
- if (!rt2x00_rf(&rt2x00dev->chip, RF2420) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
+ if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1562,7 +1562,6 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2400pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2400pci_get_tsf,
.tx_last_beacon = rt2400pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1643,7 +1642,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
/*
* RT2400pci module information.
*/
-static struct pci_device_id rt2400pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index c3dea697b907..c048b18f4133 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -65,6 +65,7 @@
* CSR0: ASIC revision number.
*/
#define CSR0 0x0000
+#define CSR0_REVISION FIELD32(0x0000ffff)
/*
* CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 408fcfc120f5..2a73f593aab0 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00pci.h"
@@ -440,8 +441,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
@@ -449,7 +449,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
@@ -475,14 +475,14 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch on tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523))
+ if (!rt2x00_rf(rt2x00dev, RF2523))
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1);
rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1);
/*
* For RT2525 we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ if (rt2x00_rf(rt2x00dev, RF2525)) {
static const u32 vals[] = {
0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a,
0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a,
@@ -516,7 +516,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch off tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ if (!rt2x00_rf(rt2x00dev, RF2523)) {
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0);
rt2500pci_rf_write(rt2x00dev, 1, rf->rf1);
}
@@ -640,7 +640,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* up to version C the link tuning should halt after 20
* seconds while being associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D &&
rt2x00dev->intf_associated && count > 20)
return;
@@ -650,7 +650,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* should go straight to dynamic CCA tuning when they
* are not associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D ||
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D ||
!rt2x00dev->intf_associated)
goto dynamic_cca_tune;
@@ -1504,15 +1504,15 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
- rt2x00_print_chip(rt2x00dev);
-
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ rt2x00_set_chip(rt2x00dev, RT2560, value,
+ rt2x00_get_field32(reg, CSR0_REVISION));
+
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1744,22 +1744,22 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
@@ -1860,7 +1860,6 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2500pci_get_tsf,
.tx_last_beacon = rt2500pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1941,7 +1940,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
/*
* RT2500pci module information.
*/
-static struct pci_device_id rt2500pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index c6bd1fcae7eb..d708031361ac 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -76,6 +76,7 @@
* CSR0: ASIC revision number.
*/
#define CSR0 0x0000
+#define CSR0_REVISION FIELD32(0x0000ffff)
/*
* CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 83f2592c59de..8ebb705fe106 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include "rt2x00.h"
@@ -368,7 +369,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
/*
* The encryption key doesn't fit within the CSR cache,
- * this means we should allocate it seperately and use
+ * this means we should allocate it separately and use
* rt2x00usb_vendor_request() to send the key to the hardware.
*/
reg = KEY_ENTRY(key->hw_key_idx);
@@ -382,7 +383,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
/*
* The driver does not support the IV/EIV generation
* in hardware. However it demands the data to be provided
- * both seperately as well as inside the frame.
+ * both separately as well as inside the frame.
* We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib
* to ensure rt2x00lib will not strip the data from the
* frame after the copy, now we must tell mac80211
@@ -565,8 +566,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
@@ -574,7 +574,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@ -598,7 +598,7 @@ static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* For RT2525E we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E)) {
static const u32 vals[] = {
0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@ -793,7 +793,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) {
+ if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
} else {
@@ -1409,21 +1409,18 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
- rt2x00_print_chip(rt2x00dev);
-
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
+ if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1647,6 +1644,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -1667,22 +1669,22 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
@@ -1763,7 +1765,6 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index c5fe867665e6..74c0433dba37 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -60,11 +60,11 @@
/*
* Chipset version.
*/
-#define RT2860C_VERSION 0x28600100
-#define RT2860D_VERSION 0x28600101
-#define RT2880E_VERSION 0x28720200
-#define RT2883_VERSION 0x28830300
-#define RT3070_VERSION 0x30700200
+#define RT2860C_VERSION 0x0100
+#define RT2860D_VERSION 0x0101
+#define RT2880E_VERSION 0x0200
+#define RT2883_VERSION 0x0300
+#define RT3070_VERSION 0x0200
/*
* Signal information.
@@ -408,8 +408,8 @@
* ASIC_VER: 2860 or 2870
*/
#define MAC_CSR0 0x1000
-#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
-#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
+#define MAC_CSR0_REVISION FIELD32(0x0000ffff)
+#define MAC_CSR0_CHIPSET FIELD32(0xffff0000)
/*
* MAC_SYS_CTRL:
@@ -1323,7 +1323,7 @@
#define PAIRWISE_KEY_ENTRY(__idx) \
( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
#define MAC_IVEIV_ENTRY(__idx) \
- ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
+ ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
#define MAC_WCID_ATTR_ENTRY(__idx) \
( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
#define SHARED_KEY_ENTRY(__idx) \
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index eb1e1d00bec3..c015ce9fdd09 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -35,11 +35,15 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "rt2x00.h"
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
#include "rt2x00usb.h"
#endif
+#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
+#include "rt2x00pci.h"
+#endif
#include "rt2800lib.h"
#include "rt2800.h"
#include "rt2800usb.h"
@@ -89,7 +93,7 @@ static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
- if (rt2x00_intf_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
@@ -118,7 +122,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
- if (rt2x00_intf_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
@@ -218,10 +222,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
u32 reg;
/*
- * RT2880 and RT3052 don't support MCU requests.
+ * SOC devices don't support MCU requests.
*/
- if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
- rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (rt2x00_is_soc(rt2x00dev))
return;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -246,6 +249,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_mcu_request);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+ !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return 0;
+
+ msleep(1);
+ }
+
+ ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+ return -EACCES;
+}
+EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -340,7 +362,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
@@ -348,7 +370,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
return 0;
}
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
+static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led, enum led_type type)
{
led->rt2x00dev = rt2x00dev;
@@ -357,7 +379,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
led->led_dev.blink_set = rt2800_blink_set;
led->flags = LED_INITIALIZED;
}
-EXPORT_SYMBOL_GPL(rt2800_init_led);
#endif /* CONFIG_RT2X00_LIB_LEDS */
/*
@@ -643,7 +664,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
switch ((int)ant->tx) {
case 1:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
- if (rt2x00_intf_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
break;
case 2:
@@ -792,9 +813,9 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 24,
rt2x00dev->calibration[conf_is_ht40(conf)]);
- rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+ rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
- rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
+ rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -806,12 +827,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
- if ((rt2x00_rt(&rt2x00dev->chip, RT3070) ||
- rt2x00_rt(&rt2x00dev->chip, RT3090)) &&
- (rt2x00_rf(&rt2x00dev->chip, RF2020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3021) ||
- rt2x00_rf(&rt2x00dev->chip, RF3022)))
+ if ((rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090)) &&
+ (rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)))
rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
@@ -878,7 +899,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -1040,8 +1062,9 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) == RT3070_VERSION))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1072,7 +1095,8 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
return;
/*
@@ -1092,7 +1116,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
u32 reg;
unsigned int i;
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
/*
* Wait until BBP and RF are ready.
*/
@@ -1111,7 +1135,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
reg & ~0x00002000);
- } else if (rt2x00_intf_is_pci(rt2x00dev))
+ } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -1119,9 +1143,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
#endif
@@ -1157,8 +1181,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -1185,8 +1210,14 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
- rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
+ if ((rt2x00_rt(rt2x00dev, RT2872) &&
+ (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
+ rt2x00_rt(rt2x00dev, RT2880) ||
+ rt2x00_rt(rt2x00dev, RT2883) ||
+ rt2x00_rt(rt2x00dev, RT2890) ||
+ rt2x00_rt(rt2x00dev, RT3052) ||
+ (rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1276,7 +1307,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
@@ -1336,7 +1367,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
@@ -1465,22 +1496,25 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
}
- if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) > RT2860D_VERSION))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
}
- if (rt2x00_rt(&rt2x00dev->chip, RT3052)) {
+ if (rt2x00_rt(rt2x00dev, RT3052)) {
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 78, 0x0e);
rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1565,14 +1599,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
u8 rfcsr;
u8 bbp;
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) != RT3070_VERSION))
return 0;
- if (rt2x00_intf_is_pci(rt2x00dev)) {
- if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ if (!rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022))
return 0;
}
@@ -1586,7 +1621,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
@@ -1607,7 +1642,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
- } else if (rt2x00_intf_is_pci(rt2x00dev)) {
+ } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -1737,7 +1772,12 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
- } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
+ } else if (rt2x00_rt(rt2x00dev, RT2860) ||
+ rt2x00_rt(rt2x00dev, RT2870) ||
+ rt2x00_rt(rt2x00dev, RT2872) ||
+ rt2x00_rt(rt2x00dev, RT2880) ||
+ (rt2x00_rt(rt2x00dev, RT2883) &&
+ (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
@@ -1836,36 +1876,34 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
-
- if (rt2x00_intf_is_usb(rt2x00dev)) {
- struct rt2x00_chip *chip = &rt2x00dev->chip;
-
- /*
- * The check for rt2860 is not a typo, some rt2870 hardware
- * identifies itself as rt2860 in the CSR register.
- */
- if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28700000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) {
- rt2x00_set_chip_rt(rt2x00dev, RT2870);
- } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) {
- rt2x00_set_chip_rt(rt2x00dev, RT3070);
- } else {
- ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
- return -ENODEV;
- }
+ rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
+ value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
+
+ if (!rt2x00_rt(rt2x00dev, RT2860) &&
+ !rt2x00_rt(rt2x00dev, RT2870) &&
+ !rt2x00_rt(rt2x00dev, RT2872) &&
+ !rt2x00_rt(rt2x00dev, RT2880) &&
+ !rt2x00_rt(rt2x00dev, RT2883) &&
+ !rt2x00_rt(rt2x00dev, RT2890) &&
+ !rt2x00_rt(rt2x00dev, RT3052) &&
+ !rt2x00_rt(rt2x00dev, RT3070) &&
+ !rt2x00_rt(rt2x00dev, RT3071) &&
+ !rt2x00_rt(rt2x00dev, RT3090) &&
+ !rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2x00_rt(rt2x00dev, RT3572)) {
+ ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
+ return -ENODEV;
}
- rt2x00_print_chip(rt2x00dev);
-
- if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
+
+ if (!rt2x00_rf(rt2x00dev, RF2820) &&
+ !rt2x00_rf(rt2x00dev, RF2850) &&
+ !rt2x00_rf(rt2x00dev, RF2720) &&
+ !rt2x00_rf(rt2x00dev, RF2750) &&
+ !rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF2020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022) &&
+ !rt2x00_rf(rt2x00dev, RF3052)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2013,7 +2051,6 @@ static const struct rf_channel rf_vals_302x[] = {
int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
- struct rt2x00_chip *chip = &rt2x00dev->chip;
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *tx_power1;
@@ -2022,6 +2059,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u16 eeprom;
/*
+ * Disable powersaving as default on PCI devices.
+ */
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -2043,19 +2086,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(chip, RF2820) ||
- rt2x00_rf(chip, RF2720) ||
- (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) {
+ if (rt2x00_rf(rt2x00dev, RF2820) ||
+ rt2x00_rf(rt2x00dev, RF2720) ||
+ rt2x00_rf(rt2x00dev, RF3052)) {
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF3020) ||
- rt2x00_rf(chip, RF2020) ||
- rt2x00_rf(chip, RF3021) ||
- rt2x00_rf(chip, RF3022)) {
+ } else if (rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)) {
spec->num_channels = ARRAY_SIZE(rf_vals_302x);
spec->channels = rf_vals_302x;
}
@@ -2063,7 +2106,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize HT information.
*/
- if (!rt2x00_rf(chip, RF2020))
+ if (!rt2x00_rf(rt2x00dev, RF2020))
spec->ht.ht_supported = true;
else
spec->ht.ht_supported = false;
@@ -2074,8 +2117,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_TX_STBC |
- IEEE80211_HT_CAP_RX_STBC |
- IEEE80211_HT_CAP_PSMP_SUPPORT;
+ IEEE80211_HT_CAP_RX_STBC;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
@@ -2140,8 +2182,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
- memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
- memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
+ memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
+ memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
}
static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@@ -2277,7 +2319,6 @@ const struct ieee80211_ops rt2800_mac80211_ops = {
.set_rts_threshold = rt2800_set_rts_threshold,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2800_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac8..ebabeae62d1b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_led *led, enum led_type type);
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dfc886fcb44d..91cce2d0f6db 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -48,14 +48,6 @@
#include "rt2800.h"
#include "rt2800pci.h"
-#ifdef CONFIG_RT2800PCI_PCI_MODULE
-#define CONFIG_RT2800PCI_PCI
-#endif
-
-#ifdef CONFIG_RT2800PCI_WISOC_MODULE
-#define CONFIG_RT2800PCI_WISOC
-#endif
-
/*
* Allow hardware encryption to be disabled.
*/
@@ -87,7 +79,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
@@ -98,7 +90,7 @@ static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
}
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -461,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
}
-static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -487,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800pci_init_queues(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
- rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
return -EIO;
@@ -570,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
/* Wait for DMA, ignore error */
- rt2800pci_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -835,7 +809,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *rxd = entry_priv->desc;
__le32 *rxwi = (__le32 *)entry->skb->data;
@@ -883,10 +856,8 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
+ if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -927,7 +898,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
* Remove TXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, RXWI_DESC_SIZE);
- skb_trim(entry->skb, rxdesc->size);
}
/*
@@ -1071,18 +1041,12 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read EEPROM into buffer
*/
- switch (rt2x00dev->chip.rt) {
- case RT2880:
- case RT3052:
+ if (rt2x00_is_soc(rt2x00dev))
rt2800pci_read_eeprom_soc(rt2x00dev);
- break;
- default:
- if (rt2800pci_efuse_detect(rt2x00dev))
- rt2800pci_read_eeprom_efuse(rt2x00dev);
- else
- rt2800pci_read_eeprom_pci(rt2x00dev);
- break;
- }
+ else if (rt2800pci_efuse_detect(rt2x00dev))
+ rt2800pci_read_eeprom_efuse(rt2x00dev);
+ else
+ rt2800pci_read_eeprom_pci(rt2x00dev);
return rt2800_validate_eeprom(rt2x00dev);
}
@@ -1133,8 +1097,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* This device requires firmware.
*/
- if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
- !rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (!rt2x00_is_soc(rt2x00dev))
__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
@@ -1221,8 +1184,11 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-static struct pci_device_id rt2800pci_device_table[] = {
- { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
+static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
+ { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1230,18 +1196,19 @@ static struct pci_device_id rt2800pci_device_table[] = {
{ PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#ifdef CONFIG_RT2800PCI_RT30XX
{ PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
+#ifdef CONFIG_RT2800PCI_RT35XX
+ { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
{ 0, }
};
@@ -1255,12 +1222,11 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
#endif /* CONFIG_RT2800PCI_PCI */
MODULE_LICENSE("GPL");
-#ifdef CONFIG_RT2800PCI_WISOC
-#if defined(CONFIG_RALINK_RT288X)
-__rt2x00soc_probe(RT2880, &rt2800pci_ops);
-#elif defined(CONFIG_RALINK_RT305X)
-__rt2x00soc_probe(RT3052, &rt2800pci_ops);
-#endif
+#ifdef CONFIG_RT2800PCI_SOC
+static int rt2800soc_probe(struct platform_device *pdev)
+{
+ return rt2x00soc_probe(pdev, &rt2800pci_ops);
+}
static struct platform_driver rt2800soc_driver = {
.driver = {
@@ -1268,12 +1234,12 @@ static struct platform_driver rt2800soc_driver = {
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
},
- .probe = __rt2x00soc_probe,
+ .probe = rt2800soc_probe,
.remove = __devexit_p(rt2x00soc_remove),
.suspend = rt2x00soc_suspend,
.resume = rt2x00soc_resume,
};
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static struct pci_driver rt2800pci_driver = {
@@ -1290,7 +1256,7 @@ static int __init rt2800pci_init(void)
{
int ret = 0;
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
ret = platform_driver_register(&rt2800soc_driver);
if (ret)
return ret;
@@ -1298,7 +1264,7 @@ static int __init rt2800pci_init(void)
#ifdef CONFIG_RT2800PCI_PCI
ret = pci_register_driver(&rt2800pci_driver);
if (ret) {
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
return ret;
@@ -1313,7 +1279,7 @@ static void __exit rt2800pci_exit(void)
#ifdef CONFIG_RT2800PCI_PCI
pci_unregister_driver(&rt2800pci_driver);
#endif
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
}
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index af85d18cdbe7..d27d7d5d850c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -92,7 +92,6 @@ static bool rt2800usb_check_crc(const u8 *data, const size_t len)
static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
size_t offset = 0;
/*
@@ -100,7 +99,7 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
* There are 2 variations of the rt2870 firmware.
* a) size: 4kb
* b) size: 8kb
- * Note that (b) contains 2 seperate firmware blobs of 4k
+ * Note that (b) contains 2 separate firmware blobs of 4k
* within the file. The first blob is the same firmware as (a),
* but the second blob is for the additional chipsets.
*/
@@ -111,14 +110,14 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
* Check if we need the upper 4kb firmware data or not.
*/
if ((len == 4096) &&
- (chipset != 0x2860) &&
- (chipset != 0x2872) &&
- (chipset != 0x3070))
+ !rt2x00_rt(rt2x00dev, RT2860) &&
+ !rt2x00_rt(rt2x00dev, RT2872) &&
+ !rt2x00_rt(rt2x00dev, RT3070))
return FW_BAD_VERSION;
/*
* 8kb firmware files must be checked as if it were
- * 2 seperate firmware files.
+ * 2 separate firmware files.
*/
while (offset < len) {
if (!rt2800usb_check_crc(data + offset, 4096))
@@ -138,14 +137,13 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
u32 reg;
u32 offset;
u32 length;
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
/*
* Check which section of the firmware we need.
*/
- if ((chipset == 0x2860) ||
- (chipset == 0x2872) ||
- (chipset == 0x3070)) {
+ if (rt2x00_rt(rt2x00dev, RT2860) ||
+ rt2x00_rt(rt2x00dev, RT2872) ||
+ rt2x00_rt(rt2x00dev, RT3070)) {
offset = 0;
length = 4096;
} else {
@@ -200,9 +198,9 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
*/
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
- if ((chipset == 0x3070) ||
- (chipset == 0x3071) ||
- (chipset == 0x3572)) {
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3572)) {
udelay(200);
rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
udelay(10);
@@ -248,24 +246,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
}
-static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -274,7 +254,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
@@ -295,9 +275,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
- /* Don't use bulk in aggregation when working with USB 1.1 */
- rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
- (rt2x00dev->rx->usb_maxpacket == 512));
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
/*
* Total room for RX frames in kilobytes, PBF might still exceed
@@ -346,7 +324,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
/* Wait for DMA, ignore error */
- rt2800usb_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
rt2x00usb_disable_radio(rt2x00dev);
}
@@ -573,41 +551,57 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- __le32 *rxd = (__le32 *)entry->skb->data;
+ __le32 *rxi = (__le32 *)entry->skb->data;
__le32 *rxwi;
- u32 rxd0;
+ __le32 *rxd;
+ u32 rxi0;
u32 rxwi0;
u32 rxwi1;
u32 rxwi2;
u32 rxwi3;
+ u32 rxd0;
+ int rx_pkt_len;
+
+ /*
+ * RX frame format is :
+ * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
+ * |<------------ rx_pkt_len -------------->|
+ */
+ rt2x00_desc_read(rxi, 0, &rxi0);
+ rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
+
+ rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
+
+ /*
+ * FIXME : we need to check for rx_pkt_len validity
+ */
+ rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
/*
* Copy descriptor to the skbdesc->desc buffer, making it safe from
* moving of frame data in rt2x00usb.
*/
- memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
- rxd = (__le32 *)skbdesc->desc;
- rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
+ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxd, 0, &rxd0);
rt2x00_desc_read(rxwi, 0, &rxwi0);
rt2x00_desc_read(rxwi, 1, &rxwi1);
rt2x00_desc_read(rxwi, 2, &rxwi2);
rt2x00_desc_read(rxwi, 3, &rxwi3);
+ rt2x00_desc_read(rxd, 0, &rxd0);
- if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR))
+ if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
rxdesc->cipher_status =
- rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR);
+ rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -622,13 +616,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS))
+ if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -663,7 +655,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
* Remove RXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, skbdesc->desc_len);
- skb_trim(entry->skb, rxdesc->size);
}
/*
@@ -814,51 +805,27 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Abocom */
{ USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* AirTies */
- { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Amigo */
- { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amit */
{ USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
{ USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Cisco */
- { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Conceptronic */
{ USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -867,156 +834,257 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Edimax */
+ { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* EnGenius */
+ { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Gigabyte */
+ { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Hawking */
+ { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Linksys */
+ { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Motorola */
+ { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* MSI */
+ { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Philips */
+ { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Samsung */
+ { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Siemens */
+ { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* SMC */
+ { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sparklan */
+ { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sweex */
+ { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* U-Media*/
+ { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ZCOM */
+ { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zinwell */
+ { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zyxel */
+ { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
+#ifdef CONFIG_RT2800USB_RT30XX
+ /* Abocom */
+ { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AirTies */
+ { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AzureWave */
+ { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Conceptronic */
+ { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Corega */
+ { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
- { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Gigabyte */
+ { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* MSI */
+ { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Pegatron */
+ { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Quanta */
+ { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* SMC */
+ { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zinwell */
+ { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
+#endif
+#ifdef CONFIG_RT2800USB_RT35XX
+ /* Askey */
+ { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Cisco */
+ { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* EnGenius */
+ { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zinwell */
+ { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
+#endif
+#ifdef CONFIG_RT2800USB_UNKNOWN
+ /*
+ * Unclear what kind of devices these are (they aren't supported by the
+ * vendor driver).
+ */
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Amigo */
+ { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Askey */
+ { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AzureWave */
+ { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0411, 0x0148), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0411, 0x0150), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0411, 0x015d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Conceptronic */
+ { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Corega */
+ { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* D-Link */
+ { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* EnGenius */
{ USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
- { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Hawking */
- { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
/* I-O DATA */
- { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Linksys */
- { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Logitec */
- { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0078), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Motorola */
- { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
/* MSI */
- { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Ovislink */
{ USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Para */
{ USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
+ { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Philips */
- { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Planex */
- { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Quanta */
- { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Ralink */
- { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Samsung */
- { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Siemens */
- { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
- { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
- { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Sparklan */
- { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* U-Media*/
- { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* ZCOM */
- { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Zinwell */
- { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zyxel */
- { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
+#endif
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 1e4340a182ef..d1d8ae94b4d4 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,6 +79,8 @@
*/
#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
+#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
+#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
/*
* TX Info structure
@@ -101,6 +103,54 @@
#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
/*
+ * RX Info structure
+ */
+
+/*
+ * Word 0
+ */
+
+#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
+
+/*
+ * RX WI structure
+ */
+
+/*
+ * Word0
+ */
+#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
+#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
+#define RXWI_W0_BSSID FIELD32(0x00001c00)
+#define RXWI_W0_UDF FIELD32(0x0000e000)
+#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
+#define RXWI_W0_TID FIELD32(0xf0000000)
+
+/*
+ * Word1
+ */
+#define RXWI_W1_FRAG FIELD32(0x0000000f)
+#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
+#define RXWI_W1_MCS FIELD32(0x007f0000)
+#define RXWI_W1_BW FIELD32(0x00800000)
+#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
+#define RXWI_W1_STBC FIELD32(0x06000000)
+#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
+
+/*
+ * Word2
+ */
+#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
+#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
+#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
+
+/*
+ * Word3
+ */
+#define RXWI_W3_SNR0 FIELD32(0x000000ff)
+#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
+
+/*
* RX descriptor format for RX Ring.
*/
@@ -115,25 +165,25 @@
* AMSDU: rx with 802.3 header, not 802.11 header.
*/
-#define RXINFO_W0_BA FIELD32(0x00000001)
-#define RXINFO_W0_DATA FIELD32(0x00000002)
-#define RXINFO_W0_NULLDATA FIELD32(0x00000004)
-#define RXINFO_W0_FRAG FIELD32(0x00000008)
-#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010)
-#define RXINFO_W0_MULTICAST FIELD32(0x00000020)
-#define RXINFO_W0_BROADCAST FIELD32(0x00000040)
-#define RXINFO_W0_MY_BSS FIELD32(0x00000080)
-#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100)
-#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600)
-#define RXINFO_W0_AMSDU FIELD32(0x00000800)
-#define RXINFO_W0_HTC FIELD32(0x00001000)
-#define RXINFO_W0_RSSI FIELD32(0x00002000)
-#define RXINFO_W0_L2PAD FIELD32(0x00004000)
-#define RXINFO_W0_AMPDU FIELD32(0x00008000)
-#define RXINFO_W0_DECRYPTED FIELD32(0x00010000)
-#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000)
-#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000)
-#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000)
-#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000)
+#define RXD_W0_BA FIELD32(0x00000001)
+#define RXD_W0_DATA FIELD32(0x00000002)
+#define RXD_W0_NULLDATA FIELD32(0x00000004)
+#define RXD_W0_FRAG FIELD32(0x00000008)
+#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W0_MULTICAST FIELD32(0x00000020)
+#define RXD_W0_BROADCAST FIELD32(0x00000040)
+#define RXD_W0_MY_BSS FIELD32(0x00000080)
+#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W0_AMSDU FIELD32(0x00000800)
+#define RXD_W0_HTC FIELD32(0x00001000)
+#define RXD_W0_RSSI FIELD32(0x00002000)
+#define RXD_W0_L2PAD FIELD32(0x00004000)
+#define RXD_W0_AMPDU FIELD32(0x00008000)
+#define RXD_W0_DECRYPTED FIELD32(0x00010000)
+#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
+#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
+#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
+#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 4d841c07c970..d9daa9c406fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -104,6 +104,12 @@
#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
/*
+ * Determine the number of L2 padding bytes required between the header and
+ * the payload.
+ */
+#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3)
+
+/*
* Determine the alignment requirement,
* to make sure the 802.11 payload is padded to a 4-byte boundrary
* we must determine the address of the payload and calculate the
@@ -113,6 +119,12 @@
( ((unsigned long)((__skb)->data + (__header))) & 3 )
/*
+ * Constants for extra TX headroom for alignment purposes.
+ */
+#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */
+#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */
+
+/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
*/
@@ -148,6 +160,7 @@ struct avg_val {
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
RT2X00_CHIP_INTF_USB,
+ RT2X00_CHIP_INTF_SOC,
};
/*
@@ -157,25 +170,26 @@ enum rt2x00_chip_intf {
*/
struct rt2x00_chip {
u16 rt;
-#define RT2460 0x0101
-#define RT2560 0x0201
-#define RT2570 0x1201
-#define RT2561s 0x0301 /* Turbo */
-#define RT2561 0x0302
-#define RT2661 0x0401
-#define RT2571 0x1300
-#define RT2860 0x0601 /* 2.4GHz PCI/CB */
-#define RT2860D 0x0681 /* 2.4GHz, 5GHz PCI/CB */
-#define RT2890 0x0701 /* 2.4GHz PCIe */
-#define RT2890D 0x0781 /* 2.4GHz, 5GHz PCIe */
+#define RT2460 0x2460
+#define RT2560 0x2560
+#define RT2570 0x2570
+#define RT2661 0x2661
+#define RT2573 0x2573
+#define RT2860 0x2860 /* 2.4GHz PCI/CB */
+#define RT2870 0x2870
+#define RT2872 0x2872
#define RT2880 0x2880 /* WSOC */
+#define RT2883 0x2883 /* WSOC */
+#define RT2890 0x2890 /* 2.4GHz PCIe */
#define RT3052 0x3052 /* WSOC */
+#define RT3070 0x3070
+#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
-#define RT2870 0x1600
-#define RT3070 0x1800
+#define RT3390 0x3390
+#define RT3572 0x3572
u16 rf;
- u32 rev;
+ u16 rev;
enum rt2x00_chip_intf intf;
};
@@ -905,51 +919,30 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
* Chipset handlers
*/
static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
- const u16 rt, const u16 rf, const u32 rev)
+ const u16 rt, const u16 rf, const u16 rev)
{
rt2x00dev->chip.rt = rt;
rt2x00dev->chip.rf = rf;
rt2x00dev->chip.rev = rev;
-}
-
-static inline void rt2x00_set_chip_rt(struct rt2x00_dev *rt2x00dev,
- const u16 rt)
-{
- rt2x00dev->chip.rt = rt;
-}
-static inline void rt2x00_set_chip_rf(struct rt2x00_dev *rt2x00dev,
- const u16 rf, const u32 rev)
-{
- rt2x00_set_chip(rt2x00dev, rt2x00dev->chip.rt, rf, rev);
-}
-
-static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
-{
INFO(rt2x00dev,
- "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n",
+ "Chipset detected - rt: %04x, rf: %04x, rev: %04x.\n",
rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
}
-static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip)
-{
- return (chipset->rt == chip);
-}
-
-static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip)
+static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
{
- return (chipset->rf == chip);
+ return (rt2x00dev->chip.rt == rt);
}
-static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset)
+static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
{
- return chipset->rev;
+ return (rt2x00dev->chip.rf == rf);
}
-static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
- const u32 mask, const u32 rev)
+static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
{
- return ((chipset->rev & mask) == rev);
+ return rt2x00dev->chip.rev;
}
static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
@@ -958,20 +951,25 @@ static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.intf = intf;
}
-static inline bool rt2x00_intf(const struct rt2x00_chip *chipset,
+static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
enum rt2x00_chip_intf intf)
{
- return (chipset->intf == intf);
+ return (rt2x00dev->chip.intf == intf);
+}
+
+static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
}
-static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
+static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
}
-static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
+static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
}
/**
@@ -1013,9 +1011,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
@@ -1032,8 +1030,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7d323a763b54..9569fb4e5bc5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include "rt2x00.h"
@@ -109,7 +110,7 @@ struct rt2x00debug_intf {
/*
* HW crypto statistics.
- * All statistics are stored seperately per cipher type.
+ * All statistics are stored separately per cipher type.
*/
struct rt2x00debug_crypto crypto_stats[CIPHER_MAX];
@@ -184,7 +185,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
dump_hdr->data_length = cpu_to_le32(skb->len);
dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
- dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev);
+ dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
dump_hdr->type = cpu_to_le16(type);
dump_hdr->queue_index = desc->entry->queue->qid;
dump_hdr->entry_index = desc->entry->entry_idx;
@@ -573,7 +574,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
blob->data = data;
data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt);
data += sprintf(data, "rf chip:\t%04x\n", intf->rt2x00dev->chip.rf);
- data += sprintf(data, "revision:\t%08x\n", intf->rt2x00dev->chip.rev);
+ data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev);
data += sprintf(data, "\n");
data += sprintf(data, "register\tbase\twords\twordsize\n");
data += sprintf(data, "csr\t%d\t%d\t%d\n",
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 06c43ca39bf8..eda73ba735a6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00lib.h"
@@ -385,9 +386,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
memset(&rxdesc, 0, sizeof(rxdesc));
rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
- /* Trim buffer to correct size */
- skb_trim(entry->skb, rxdesc.size);
-
/*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
@@ -397,18 +395,23 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
/*
* Hardware might have stripped the IV/EIV/ICV data,
* in that case it is possible that the data was
- * provided seperately (through hardware descriptor)
+ * provided separately (through hardware descriptor)
* in which case we should reinsert the data into the frame.
*/
if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
(rxdesc.flags & RX_FLAG_IV_STRIPPED))
rt2x00crypto_rx_insert_iv(entry->skb, header_length,
&rxdesc);
- else if (rxdesc.dev_flags & RXDONE_L2PAD)
+ else if (header_length &&
+ (rxdesc.size > header_length) &&
+ (rxdesc.dev_flags & RXDONE_L2PAD))
rt2x00queue_remove_l2pad(entry->skb, header_length);
else
rt2x00queue_align_payload(entry->skb, header_length);
+ /* Trim buffer to correct size */
+ skb_trim(entry->skb, rxdesc.size);
+
/*
* Check if the frame was received using HT. In that case,
* the rate is the MCS index and should be passed to mac80211
@@ -686,7 +689,17 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Initialize extra TX headroom required.
*/
- rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom;
+ rt2x00dev->hw->extra_tx_headroom =
+ max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
+ rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Take TX headroom required for alignment into account.
+ */
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
+ else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
* Register HW.
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index de549c244ed8..abbd857ec759 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -187,10 +187,10 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct queue_entry *entry = NULL;
unsigned int i;
@@ -203,7 +203,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
return -ENODEV;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
/*
* We don't support mixed combinations of
@@ -263,7 +263,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
* increase interface count and start initialization.
*/
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count++;
else
rt2x00dev->intf_sta_count++;
@@ -273,16 +273,16 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
- if (conf->type == NL80211_IFTYPE_AP)
- memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
- memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
+ if (vif->type == NL80211_IFTYPE_AP)
+ memcpy(&intf->bssid, vif->addr, ETH_ALEN);
+ memcpy(&intf->mac, vif->addr, ETH_ALEN);
/*
* The MAC adddress must be configured after the device
* has been initialized. Otherwise the device can reset
* the MAC registers.
*/
- rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL);
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
/*
* Some filters depend on the current working mode. We can force
@@ -296,10 +296,10 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
/*
* Don't allow interfaces to be remove while
@@ -307,11 +307,11 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
* no interface is present.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
- (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
- (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
+ (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
+ (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
return;
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count--;
else
rt2x00dev->intf_sta_count--;
@@ -555,22 +555,6 @@ int rt2x00mac_get_stats(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
- unsigned int i;
-
- for (i = 0; i < rt2x00dev->ops->tx_queues; i++) {
- stats[i].len = rt2x00dev->tx[i].length;
- stats[i].limit = rt2x00dev->tx[i].limit;
- stats[i].count = rt2x00dev->tx[i].count;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_get_tx_stats);
-
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0feb4d0e4668..cf3f1c0c4382 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00pci.h"
@@ -41,6 +42,9 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return 0;
+
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2x00pci_register_read(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
@@ -269,7 +273,6 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
int retval;
- u16 chip;
retval = pci_request_regions(pci_dev, pci_name(pci_dev));
if (retval) {
@@ -312,12 +315,6 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
- /*
- * Determine RT chipset by reading PCI header.
- */
- pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
- rt2x00_set_chip_rt(rt2x00dev, chip);
-
retval = rt2x00pci_alloc_reg(rt2x00dev);
if (retval)
goto exit_free_device;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index d4f9449ab0a4..8149ff68410a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -27,6 +27,7 @@
#define RT2X00PCI_H
#include <linux/io.h>
+#include <linux/pci.h>
/*
* This variable should be used with the
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 239afc7a9c0b..a0bd36fc4d2e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -24,6 +24,7 @@
Abstract: rt2x00 queue specific routines.
*/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
@@ -104,7 +105,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* is also mapped to the DMA so it can be used for transfering
* additional descriptor information to the hardware.
*/
- skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->skb_dma =
dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
@@ -112,7 +113,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
/*
* Restore data pointer to original location again.
*/
- skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
@@ -134,7 +135,7 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* by the driver, but it was actually mapped to DMA.
*/
dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
- skb->len + rt2x00dev->hw->extra_tx_headroom,
+ skb->len + rt2x00dev->ops->extra_tx_headroom,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
@@ -177,55 +178,45 @@ void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int frame_length = skb->len;
+ unsigned int payload_length = skb->len - header_length;
unsigned int header_align = ALIGN_SIZE(skb, 0);
unsigned int payload_align = ALIGN_SIZE(skb, header_length);
- unsigned int l2pad = 4 - (payload_align - header_align);
+ unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
- if (header_align == payload_align) {
- /*
- * Both header and payload must be moved the same
- * amount of bytes to align them properly. This means
- * we don't use the L2 padding but just move the entire
- * frame.
- */
- rt2x00queue_align_frame(skb);
- } else if (!payload_align) {
- /*
- * Simple L2 padding, only the header needs to be moved,
- * the payload is already properly aligned.
- */
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, frame_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- } else {
- /*
- *
- * Complicated L2 padding, both header and payload need
- * to be moved. By default we only move to the start
- * of the buffer, so our header alignment needs to be
- * increased if there is not enough room for the header
- * to be moved.
- */
- if (payload_align > header_align)
- header_align += 4;
+ /*
+ * Adjust the header alignment if the payload needs to be moved more
+ * than the header.
+ */
+ if (payload_align > header_align)
+ header_align += 4;
+
+ /* There is nothing to do if no alignment is needed */
+ if (!header_align)
+ return;
+
+ /* Reserve the amount of space needed in front of the frame */
+ skb_push(skb, header_align);
+
+ /*
+ * Move the header.
+ */
+ memmove(skb->data, skb->data + header_align, header_length);
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, header_length);
+ /* Move the payload, if present and if required */
+ if (payload_length && payload_align)
memmove(skb->data + header_length + l2pad,
skb->data + header_length + l2pad + payload_align,
- frame_length - header_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
+ payload_length);
+
+ /* Trim the skb to the correct size */
+ skb_trim(skb, header_length + l2pad + payload_length);
}
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int l2pad = 4 - (header_length & 3);
+ unsigned int l2pad = L2PAD_SIZE(header_length);
- if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED))
+ if (!l2pad)
return;
memmove(skb->data + l2pad, skb->data, header_length);
@@ -346,7 +337,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
* Header and alignment information.
*/
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
+ (entry->skb->len > txdesc->header_length))
+ txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
/*
* Check whether this frame is to be acked.
@@ -387,10 +380,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Beacons and probe responses require the tsf timestamp
- * to be inserted into the frame.
+ * to be inserted into the frame, except for a frame that has been injected
+ * through a monitor interface. This latter is needed for testing a
+ * monitor interface.
*/
- if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ if ((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
/*
@@ -502,7 +498,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
/*
* When hardware encryption is supported, and this frame
* is to be encrypted, we should strip the IV/EIV data from
- * the frame so we can provide it to the driver seperately.
+ * the frame so we can provide it to the driver separately.
*/
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 70775e5ba1ac..c1e482bb37b3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -92,8 +92,6 @@ enum data_queue_qid {
* @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
* @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
* mac80211 but was stripped for processing by the driver.
- * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
- * the padded bytes are located between header and payload.
* @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
* don't try to pass it back.
*/
@@ -101,8 +99,7 @@ enum skb_frame_desc_flags {
SKBDESC_DMA_MAPPED_RX = 1 << 0,
SKBDESC_DMA_MAPPED_TX = 1 << 1,
SKBDESC_IV_STRIPPED = 1 << 2,
- SKBDESC_L2_PADDED = 1 << 3,
- SKBDESC_NOT_MAC80211 = 1 << 4,
+ SKBDESC_NOT_MAC80211 = 1 << 3,
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index 19e684f8ffa1..fc98063de71d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00soc.h"
@@ -71,9 +72,7 @@ exit:
return -ENOMEM;
}
-int rt2x00soc_probe(struct platform_device *pdev,
- const unsigned short chipset,
- const struct rt2x00_ops *ops)
+int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
{
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
@@ -94,12 +93,7 @@ int rt2x00soc_probe(struct platform_device *pdev,
rt2x00dev->irq = platform_get_irq(pdev, 0);
rt2x00dev->name = pdev->dev.driver->name;
- /*
- * SoC devices mimic PCI behavior.
- */
- rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
-
- rt2x00_set_chip_rt(rt2x00dev, chipset);
+ rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
retval = rt2x00soc_alloc_reg(rt2x00dev);
if (retval)
@@ -119,6 +113,7 @@ exit_free_device:
return retval;
}
+EXPORT_SYMBOL_GPL(rt2x00soc_probe);
int rt2x00soc_remove(struct platform_device *pdev)
{
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
index 8a3416624af5..474cbfc1efc7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.h
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -26,20 +26,10 @@
#ifndef RT2X00SOC_H
#define RT2X00SOC_H
-#define KSEG1ADDR(__ptr) __ptr
-
-#define __rt2x00soc_probe(__chipset, __ops) \
-static int __rt2x00soc_probe(struct platform_device *pdev) \
-{ \
- return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
-}
-
/*
* SoC driver handlers.
*/
-int rt2x00soc_probe(struct platform_device *pdev,
- const unsigned short chipset,
- const struct rt2x00_ops *ops);
+int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops);
int rt2x00soc_remove(struct platform_device *pdev);
#ifdef CONFIG_PM
int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 0a751e73aa0f..f9a7f8b17411 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/bug.h>
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 687e17dc2e9f..432e75f960b7 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/eeprom_93cx6.h>
@@ -476,7 +477,7 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
* The driver does not support the IV/EIV generation
* in hardware. However it doesn't support the IV/EIV
* inside the ieee80211 frame either, but requires it
- * to be provided seperately for the descriptor.
+ * to be provided separately for the descriptor.
* rt2x00lib will cut the IV/EIV data out of all frames
* given to us by mac80211, but we must tell mac80211
* to generate the IV/EIV data.
@@ -637,8 +638,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF5325));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
/*
* Configure the RX antenna.
@@ -684,8 +684,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF2529));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
!test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
@@ -833,12 +832,11 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
rt61pci_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ else if (rt2x00_rf(rt2x00dev, RF2529)) {
if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
@@ -879,8 +877,7 @@ static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt61pci_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1135,16 +1132,18 @@ dynamic_cca_tune:
*/
static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
{
+ u16 chip;
char *fw_name;
- switch (rt2x00dev->chip.rt) {
- case RT2561:
+ pci_read_config_word(to_pci_dev(rt2x00dev->dev), PCI_DEVICE_ID, &chip);
+ switch (chip) {
+ case RT2561_PCI_ID:
fw_name = FIRMWARE_RT2561;
break;
- case RT2561s:
+ case RT2561s_PCI_ID:
fw_name = FIRMWARE_RT2561s;
break;
- case RT2661:
+ case RT2661_PCI_ID:
fw_name = FIRMWARE_RT2661;
break;
default:
@@ -2299,13 +2298,13 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
- rt2x00_print_chip(rt2x00dev);
+ rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
+ value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
- if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5325) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ if (!rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF5325) &&
+ !rt2x00_rf(rt2x00dev, RF2527) &&
+ !rt2x00_rf(rt2x00dev, RF2529)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2360,7 +2359,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* the antenna settings should be gathered from the NIC
* eeprom word.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2529) &&
+ if (rt2x00_rf(rt2x00dev, RF2529) &&
!test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@ -2539,6 +2538,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -2566,8 +2570,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels = rf_vals_seq;
}
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325)) {
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_seq);
}
@@ -2730,7 +2733,6 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt61pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt61pci_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
@@ -2807,7 +2809,7 @@ static const struct rt2x00_ops rt61pci_ops = {
/*
* RT61pci module information.
*/
-static struct pci_device_id rt61pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
/* RT2561s */
{ PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
/* RT2561 v2 */
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 8f13810622bd..df80f1af22a4 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -28,6 +28,13 @@
#define RT61PCI_H
/*
+ * RT chip PCI IDs.
+ */
+#define RT2561s_PCI_ID 0x0301
+#define RT2561_PCI_ID 0x0302
+#define RT2661_PCI_ID 0x0401
+
+/*
* RF chip defines.
*/
#define RF5225 0x0001
@@ -225,6 +232,8 @@ struct hw_pairwise_ta_entry {
* MAC_CSR0: ASIC revision number.
*/
#define MAC_CSR0 0x3000
+#define MAC_CSR0_REVISION FIELD32(0x0000000f)
+#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0)
/*
* MAC_CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ced3b6ab5e16..bb58d797fb72 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include "rt2x00.h"
@@ -136,8 +137,8 @@ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
* all others contain 20 bits.
*/
rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
- 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527)));
+ 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
+ rt2x00_rf(rt2x00dev, RF2527)));
rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
@@ -339,7 +340,7 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
* The driver does not support the IV/EIV generation
* in hardware. However it doesn't support the IV/EIV
* inside the ieee80211 frame either, but requires it
- * to be provided seperately for the descriptor.
+ * to be provided separately for the descriptor.
* rt2x00lib will cut the IV/EIV data out of all frames
* given to us by mac80211, but we must tell mac80211
* to generate the IV/EIV data.
@@ -439,7 +440,7 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
* The driver does not support the IV/EIV generation
* in hardware. However it doesn't support the IV/EIV
* inside the ieee80211 frame either, but requires it
- * to be provided seperately for the descriptor.
+ * to be provided separately for the descriptor.
* rt2x00lib will cut the IV/EIV data out of all frames
* given to us by mac80211, but we must tell mac80211
* to generate the IV/EIV data.
@@ -741,11 +742,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5226) ||
- rt2x00_rf(&rt2x00dev->chip, RF5225))
+ if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
rt73usb_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2528) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
rt73usb_config_antenna_2x(rt2x00dev, ant);
}
@@ -779,8 +778,7 @@ static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt73usb_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1210,8 +1208,7 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
reg = 0x000023b0;
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
@@ -1665,7 +1662,7 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
- * decryption. It has provided the data seperately but rt2x00lib
+ * decryption. It has provided the data separately but rt2x00lib
* should decide if it should be reinserted.
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
@@ -1824,19 +1821,18 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
- rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
- rt2x00_print_chip(rt2x00dev);
+ rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
+ value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
+ if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF5226) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2528) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ if (!rt2x00_rf(rt2x00dev, RF5226) &&
+ !rt2x00_rf(rt2x00dev, RF2528) &&
+ !rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF2527)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2081,17 +2077,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
+ if (rt2x00_rf(rt2x00dev, RF2528)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
spec->channels = rf_vals_bg_2528;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5226)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5226);
spec->channels = rf_vals_5226;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2527)) {
spec->num_channels = 14;
spec->channels = rf_vals_5225_2527;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5225)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
spec->channels = rf_vals_5225_2527;
@@ -2249,7 +2245,6 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt73usb_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt73usb_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
@@ -2354,9 +2349,12 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
+ /* CEIVA */
+ { USB_DEVICE(0x178d, 0x02be), USB_DEVICE_DATA(&rt73usb_ops) },
/* CNet */
{ USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x1371, 0x9032), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7942f810e928..7abe7eb14555 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -142,6 +142,8 @@ struct hw_pairwise_ta_entry {
* MAC_CSR0: ASIC revision number.
*/
#define MAC_CSR0 0x3000
+#define MAC_CSR0_REVISION FIELD32(0x0000000f)
+#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0)
/*
* MAC_CSR1: System control register.
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a8185..de3844fe06d8 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -60,7 +60,6 @@ struct rtl8180_priv {
struct rtl818x_csr __iomem *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
/* rtl8180 driver specific */
spinlock_t lock;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index a1a3dd15c664..2131a442831a 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/eeprom_93cx6.h>
@@ -33,7 +34,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
MODULE_LICENSE("GPL");
-static struct pci_device_id rtl8180_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
/* rtl8185 */
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
{ PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -82,8 +83,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
};
-
-
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
{
struct rtl8180_priv *priv = dev->priv;
@@ -132,7 +131,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.antenna = (flags2 >> 15) & 1;
/* TODO: improve signal/rssi reporting */
- rx_status.qual = flags2 & 0xFF;
rx_status.signal = (flags2 >> 8) & 0x7F;
/* XXX: is this correct? */
rx_status.rate_idx = (flags >> 20) & 0xF;
@@ -616,7 +614,6 @@ static int rtl8180_start(struct ieee80211_hw *dev)
reg |= RTL818X_CMD_TX_ENABLE;
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
- priv->mode = NL80211_IFTYPE_MONITOR;
return 0;
err_free_rings:
@@ -634,8 +631,6 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
u8 reg;
int i;
- priv->mode = NL80211_IFTYPE_UNSPECIFIED;
-
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -658,38 +653,39 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
}
static int rtl8180_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- if (priv->mode != NL80211_IFTYPE_MONITOR)
- return -EOPNOTSUPP;
+ /*
+ * We only support one active interface at a time.
+ */
+ if (priv->vif)
+ return -EBUSY;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
- le32_to_cpu(*(__le32 *)conf->mac_addr));
+ le32_to_cpu(*(__le32 *)vif->addr));
rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
- le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ le16_to_cpu(*(__le16 *)(vif->addr + 4)));
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
return 0;
}
static void rtl8180_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
}
@@ -766,6 +762,14 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
}
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
.start = rtl8180_start,
@@ -776,6 +780,7 @@ static const struct ieee80211_ops rtl8180_ops = {
.bss_info_changed = rtl8180_bss_info_changed,
.prepare_multicast = rtl8180_prepare_multicast,
.configure_filter = rtl8180_configure_filter,
+ .get_tsf = rtl8180_get_tsf,
};
static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6af0f3f71f3a..6bb32112e65c 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -92,7 +92,7 @@ struct rtl8187_priv {
struct rtl818x_csr *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
+
/* The mutex protects the TX loopback state.
* Any attempt to set channels concurrently locks the device.
*/
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe4..1d30792973f5 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/usb.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/eeprom_93cx6.h>
@@ -65,6 +66,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
/* Sitecom */
{USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+ {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
/* Sphairon Access Systems GmbH */
{USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
/* Dick Smith Electronics */
@@ -1018,31 +1020,30 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
}
static int rtl8187_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
int i;
int ret = -EOPNOTSUPP;
mutex_lock(&priv->conf_mutex);
- if (priv->mode != NL80211_IFTYPE_MONITOR)
+ if (priv->vif)
goto exit;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
goto exit;
}
ret = 0;
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->MAC[i],
- ((u8 *)conf->mac_addr)[i]);
+ ((u8 *)vif->addr)[i]);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
exit:
@@ -1051,11 +1052,10 @@ exit:
}
static void rtl8187_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
mutex_unlock(&priv->conf_mutex);
}
@@ -1267,6 +1267,14 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
return 0;
}
+static u64 rtl8187_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8187_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8187_ops = {
.tx = rtl8187_tx,
.start = rtl8187_start,
@@ -1278,7 +1286,8 @@ static const struct ieee80211_ops rtl8187_ops = {
.prepare_multicast = rtl8187_prepare_multicast,
.configure_filter = rtl8187_configure_filter,
.conf_tx = rtl8187_conf_tx,
- .rfkill_poll = rtl8187_rfkill_poll
+ .rfkill_poll = rtl8187_rfkill_poll,
+ .get_tsf = rtl8187_get_tsf,
};
static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -1365,7 +1374,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- priv->mode = NL80211_IFTYPE_MONITOR;
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index ded44c045eb2..4637337d5ce6 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -33,7 +33,7 @@ static void led_turn_on(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
@@ -71,7 +71,7 @@ static void led_turn_off(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
@@ -241,5 +241,5 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev)
cancel_delayed_work_sync(&priv->led_off);
cancel_delayed_work_sync(&priv->led_on);
}
-#endif /* def CONFIG_RTL8187_LED */
+#endif /* def CONFIG_RTL8187_LEDS */
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187_leds.h
index efe8041bdda4..d743c96d4a20 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.h
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.h
@@ -54,6 +54,6 @@ struct rtl8187_led {
void rtl8187_leds_init(struct ieee80211_hw *dev, u16 code);
void rtl8187_leds_exit(struct ieee80211_hw *dev);
-#endif /* def CONFIG_RTL8187_LED */
+#endif /* def CONFIG_RTL8187_LEDS */
#endif /* RTL8187_LED_H */
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 62e37ad01cc0..f47ec94c16dc 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -10,5 +10,7 @@ obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \
wl1271_event.o wl1271_tx.o wl1271_rx.o \
wl1271_ps.o wl1271_acx.o wl1271_boot.o \
- wl1271_init.o wl1271_debugfs.o
+ wl1271_init.o wl1271_debugfs.o wl1271_io.o
+
+wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
obj-$(CONFIG_WL1271) += wl1271.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 054533f7a124..37c61c19cae5 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -247,6 +247,7 @@ struct wl1251_debugfs {
struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
struct dentry *tx_queue_len;
+ struct dentry *tx_queue_status;
struct dentry *retry_count;
struct dentry *excessive_retries;
@@ -340,9 +341,6 @@ struct wl1251 {
/* Are we currently scanning */
bool scanning;
- /* Our association ID */
- u16 aid;
-
/* Default key (for WEP) */
u32 default_key;
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index acfa086dbfc5..91891f928070 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -1,6 +1,7 @@
#include "wl1251_acx.h"
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/crc7.h>
#include "wl1251.h"
@@ -976,3 +977,72 @@ out:
kfree(acx);
return ret;
}
+
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop)
+{
+ struct wl1251_acx_ac_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
+ "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->ac = ac;
+ acx->cw_min = cw_min;
+ acx->cw_max = cw_max;
+ acx->aifsn = aifs;
+ acx->txop_limit = txop;
+
+ ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx ac cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy)
+{
+ struct wl1251_acx_tid_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
+ "ps_scheme %d ack_policy %d", queue, type, tsid,
+ ps_scheme, ack_policy);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->queue = queue;
+ acx->type = type;
+ acx->tsid = tsid;
+ acx->ps_scheme = ps_scheme;
+ acx->ack_policy = ack_policy;
+
+ ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx tid cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 652371432cd8..26160c45784c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -1166,6 +1166,87 @@ struct wl1251_acx_wr_tbtt_and_dtim {
u8 padding;
} __attribute__ ((packed));
+struct wl1251_acx_ac_cfg {
+ struct acx_header header;
+
+ /*
+ * Access Category - The TX queue's access category
+ * (refer to AccessCategory_enum)
+ */
+ u8 ac;
+
+ /*
+ * The contention window minimum size (in slots) for
+ * the access class.
+ */
+ u8 cw_min;
+
+ /*
+ * The contention window maximum size (in slots) for
+ * the access class.
+ */
+ u16 cw_max;
+
+ /* The AIF value (in slots) for the access class. */
+ u8 aifsn;
+
+ u8 reserved;
+
+ /* The TX Op Limit (in microseconds) for the access class. */
+ u16 txop_limit;
+} __attribute__ ((packed));
+
+
+enum wl1251_acx_channel_type {
+ CHANNEL_TYPE_DCF = 0,
+ CHANNEL_TYPE_EDCF = 1,
+ CHANNEL_TYPE_HCCA = 2,
+};
+
+enum wl1251_acx_ps_scheme {
+ /* regular ps: simple sending of packets */
+ WL1251_ACX_PS_SCHEME_LEGACY = 0,
+
+ /* sending a packet triggers a unscheduled apsd downstream */
+ WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
+
+ /* a pspoll packet will be sent before every data packet */
+ WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
+
+ /* scheduled apsd mode */
+ WL1251_ACX_PS_SCHEME_SAPSD = 3,
+};
+
+enum wl1251_acx_ack_policy {
+ WL1251_ACX_ACK_POLICY_LEGACY = 0,
+ WL1251_ACX_ACK_POLICY_NO_ACK = 1,
+ WL1251_ACX_ACK_POLICY_BLOCK = 2,
+};
+
+struct wl1251_acx_tid_cfg {
+ struct acx_header header;
+
+ /* tx queue id number (0-7) */
+ u8 queue;
+
+ /* channel access type for the queue, enum wl1251_acx_channel_type */
+ u8 type;
+
+ /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */
+ u8 tsid;
+
+ /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */
+ u8 ps_scheme;
+
+ /* the tx queue ack policy, enum wl1251_acx_ack_policy */
+ u8 ack_policy;
+
+ u8 padding[3];
+
+ /* not supported */
+ u32 apsdconf[2];
+} __attribute__ ((packed));
+
/*************************************************************************
Host Interrupt Register (WiLink -> Host)
@@ -1322,5 +1403,11 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop);
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy);
#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 2e733e7bdfd4..d5ac79aeaa73 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -22,6 +22,7 @@
*/
#include <linux/gpio.h>
+#include <linux/slab.h>
#include "wl1251_reg.h"
#include "wl1251_boot.h"
@@ -256,7 +257,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
}
}
- if (loop >= INIT_LOOP) {
+ if (loop > INIT_LOOP) {
wl1251_error("timeout waiting for the hardware to "
"complete initialization");
return -EIO;
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726bd..a37b30cef489 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -1,6 +1,7 @@
#include "wl1251_cmd.h"
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/crc7.h>
#include "wl1251.h"
@@ -410,3 +411,86 @@ out:
kfree(cmd);
return ret;
}
+
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes)
+{
+ struct wl1251_cmd_scan *cmd;
+ int i, ret = 0;
+
+ wl1251_debug(DEBUG_CMD, "cmd scan");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
+ cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
+ CFG_RX_MGMT_EN |
+ CFG_RX_BCN_EN);
+ cmd->params.scan_options = 0;
+ cmd->params.num_channels = n_channels;
+ cmd->params.num_probe_requests = n_probes;
+ cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
+ cmd->params.tid_trigger = 0;
+
+ for (i = 0; i < n_channels; i++) {
+ cmd->channels[i].min_duration =
+ cpu_to_le32(WL1251_SCAN_MIN_DURATION);
+ cmd->channels[i].max_duration =
+ cpu_to_le32(WL1251_SCAN_MAX_DURATION);
+ memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
+ memset(&cmd->channels[i].bssid_msb, 0xff, 2);
+ cmd->channels[i].early_termination = 0;
+ cmd->channels[i].tx_power_att = 0;
+ cmd->channels[i].channel = channels[i]->hw_value;
+ }
+
+ cmd->params.ssid_len = ssid_len;
+ if (ssid)
+ memcpy(cmd->params.ssid, ssid, ssid_len);
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd scan failed: %d", ret);
+ goto out;
+ }
+
+ wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
+
+ if (cmd->header.status != CMD_STATUS_SUCCESS) {
+ wl1251_error("cmd scan status wasn't success: %d",
+ cmd->header.status);
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
+{
+ struct wl1251_cmd_trigger_scan_to *cmd;
+ int ret;
+
+ wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->timeout = timeout;
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef5..4ad67cae94d2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
#include "wl1251.h"
+#include <net/cfg80211.h>
+
struct acx_header;
int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
size_t len);
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len);
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes);
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
/* unit ms */
#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+#define WL1251_SCAN_MIN_DURATION 30000
+#define WL1251_SCAN_MAX_DURATION 60000
+
+#define WL1251_SCAN_NUM_PROBES 3
-struct basic_scan_parameters {
+struct wl1251_scan_parameters {
u32 rx_config_options;
u32 rx_filter_options;
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
u8 tid_trigger;
u8 ssid_len;
- u32 ssid[8];
+ u8 ssid[32];
} __attribute__ ((packed));
-struct basic_scan_channel_parameters {
+struct wl1251_scan_ch_parameters {
u32 min_duration; /* in TU */
u32 max_duration; /* in TU */
u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
/* SCAN parameters */
#define SCAN_MAX_NUM_OF_CHANNELS 16
-struct cmd_scan {
+struct wl1251_cmd_scan {
struct wl1251_cmd_header header;
- struct basic_scan_parameters params;
- struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
+ struct wl1251_scan_parameters params;
+ struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
} __attribute__ ((packed));
enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index a00723059f83..5e4465ac08fa 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -24,6 +24,7 @@
#include "wl1251_debugfs.h"
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include "wl1251.h"
#include "wl1251_acx.h"
@@ -237,6 +238,27 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1251_open_file_generic,
};
+static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1251 *wl = file->private_data;
+ char buf[3], status;
+ int len;
+
+ if (wl->tx_queue_stopped)
+ status = 's';
+ else
+ status = 'r';
+
+ len = scnprintf(buf, sizeof(buf), "%c\n", status);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations tx_queue_status_ops = {
+ .read = tx_queue_status_read,
+ .open = wl1251_open_file_generic,
+};
+
static void wl1251_debugfs_delete_files(struct wl1251 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -331,6 +353,7 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_DEL(tx_queue_len);
+ DEBUGFS_DEL(tx_queue_status);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
}
@@ -431,6 +454,7 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
+ DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
@@ -443,7 +467,8 @@ out:
void wl1251_debugfs_reset(struct wl1251 *wl)
{
- memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
+ if (wl->stats.fw_stats != NULL)
+ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
wl->stats.retry_count = 0;
wl->stats.excessive_retries = 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index 5cb573383eeb..b538bdd7b320 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "wl1251_init.h"
#include "wl12xx_80211.h"
@@ -294,6 +295,11 @@ static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
goto out;
}
+ wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
+ wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
+ wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
+ wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
+
out:
kfree(config);
return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index b3b25ec885ea..269cefb3e7d4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -26,6 +26,53 @@
#include "wl1251.h"
+enum {
+ /* best effort/legacy */
+ AC_BE = 0,
+
+ /* background */
+ AC_BK = 1,
+
+ /* video */
+ AC_VI = 2,
+
+ /* voice */
+ AC_VO = 3,
+
+ /* broadcast dummy access category */
+ AC_BCAST = 4,
+
+ NUM_ACCESS_CATEGORIES = 4
+};
+
+/* following are defult values for the IE fields*/
+#define CWMIN_BK 15
+#define CWMIN_BE 15
+#define CWMIN_VI 7
+#define CWMIN_VO 3
+#define CWMAX_BK 1023
+#define CWMAX_BE 63
+#define CWMAX_VI 15
+#define CWMAX_VO 7
+
+/* slot number setting to start transmission at PIFS interval */
+#define AIFS_PIFS 1
+
+/*
+ * slot number setting to start transmission at DIFS interval - normal DCF
+ * access
+ */
+#define AIFS_DIFS 2
+
+#define AIFSN_BK 7
+#define AIFSN_BE 3
+#define AIFSN_VI AIFS_PIFS
+#define AIFSN_VO AIFS_PIFS
+#define TXOP_BK 0
+#define TXOP_BE 0
+#define TXOP_VI 3008
+#define TXOP_VO 1504
+
int wl1251_hw_init_hwenc_config(struct wl1251 *wl);
int wl1251_hw_init_templates_config(struct wl1251 *wl);
int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 2f50a256efa5..1c8226eee409 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -29,6 +29,7 @@
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include "wl1251.h"
#include "wl12xx_80211.h"
@@ -395,6 +396,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* the queue here, otherwise the queue will get too long.
*/
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
+ wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
ieee80211_stop_queues(wl->hw);
/*
@@ -510,13 +512,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
}
static int wl1251_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
int ret = 0;
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -524,9 +526,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -538,8 +540,8 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) {
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
if (ret < 0)
@@ -552,7 +554,7 @@ out:
}
static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
@@ -562,43 +564,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static int wl1251_build_null_data(struct wl1251 *wl)
+static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
- struct wl12xx_null_data_template template;
+ struct ieee80211_qos_hdr template;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
-
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
-
- return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
- sizeof(template));
-
-}
-
-static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
-{
- struct wl12xx_ps_poll_template template;
+ memset(&template, 0, sizeof(template));
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
- return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template,
+ return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
sizeof(template));
-
}
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -634,26 +618,34 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->psm_requested = true;
+ wl->dtim_period = conf->ps_dtim_period;
+
+ ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
+ wl->dtim_period);
+
/*
- * We enter PSM only if we're already associated.
- * If we're not, we'll enter it when joining an SSID,
- * through the bss_info_changed() hook.
+ * mac80211 enables PSM only if we're already associated.
*/
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
- if (wl->psm)
+ if (wl->psm) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
if (conf->power_level != wl->power_level) {
ret = wl1251_acx_tx_power(wl, conf->power_level);
if (ret < 0)
- goto out;
+ goto out_sleep;
wl->power_level = conf->power_level;
}
@@ -864,199 +856,61 @@ out:
return ret;
}
-static int wl1251_build_basic_rates(char *rates)
-{
- u8 index = 0;
-
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
-
- return index;
-}
-
-static int wl1251_build_extended_rates(char *rates)
+static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
{
- u8 index = 0;
-
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
-
- return index;
-}
-
+ struct wl1251 *wl = hw->priv;
+ struct sk_buff *skb;
+ size_t ssid_len = 0;
+ u8 *ssid = NULL;
+ int ret;
-static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len)
-{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
-
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1251_build_basic_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1251_build_extended_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
-
- return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
- size);
-}
+ wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 num_channels,
- u8 probe_requests)
-{
- struct wl1251_cmd_trigger_scan_to *trigger = NULL;
- struct cmd_scan *params = NULL;
- int i, ret;
- u16 scan_options = 0;
-
- if (wl->scanning)
- return -EINVAL;
-
- params = kzalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
-
- params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
- params->params.rx_filter_options =
- cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
-
- /* High priority scan */
- if (!active_scan)
- scan_options |= SCAN_PASSIVE;
- if (high_prio)
- scan_options |= SCAN_PRIORITY_HIGH;
- params->params.scan_options = scan_options;
-
- params->params.num_channels = num_channels;
- params->params.num_probe_requests = probe_requests;
- params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
- params->params.tid_trigger = 0;
-
- for (i = 0; i < num_channels; i++) {
- params->channels[i].min_duration = cpu_to_le32(30000);
- params->channels[i].max_duration = cpu_to_le32(60000);
- memset(&params->channels[i].bssid_lsb, 0xff, 4);
- memset(&params->channels[i].bssid_msb, 0xff, 2);
- params->channels[i].early_termination = 0;
- params->channels[i].tx_power_att = 0;
- params->channels[i].channel = i + 1;
- memset(params->channels[i].pad, 0, 3);
+ if (req->n_ssids) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
}
- for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++)
- memset(&params->channels[i], 0,
- sizeof(struct basic_scan_channel_parameters));
-
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
- } else {
- params->params.ssid_len = 0;
- memset(params->params.ssid, 0, 32);
- }
+ mutex_lock(&wl->mutex);
- ret = wl1251_build_probe_req(wl, ssid, len);
- if (ret < 0) {
- wl1251_error("PROBE request template failed");
+ if (wl->scanning) {
+ wl1251_debug(DEBUG_SCAN, "scan already in progress");
+ ret = -EINVAL;
goto out;
}
- trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
- if (!trigger)
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
goto out;
- trigger->timeout = 0;
-
- ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
- sizeof(*trigger));
- if (ret < 0) {
- wl1251_error("trigger scan to failed for hw scan");
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ req->ie, req->ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
goto out;
}
- wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
-
- wl->scanning = true;
+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
- ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
+ ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
- wl1251_error("SCAN failed");
+ goto out_sleep;
- wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
+ wl->scanning = true;
- if (params->header.status != CMD_STATUS_SUCCESS) {
- wl1251_error("TEST command answer error: %d",
- params->header.status);
+ ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
+ req->n_channels, WL1251_SCAN_NUM_PROBES);
+ if (ret < 0) {
wl->scanning = false;
- ret = -EIO;
- goto out;
- }
-
-out:
- kfree(params);
- return ret;
-
-}
-
-static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
-{
- struct wl1251 *wl = hw->priv;
- int ret;
- u8 *ssid = NULL;
- size_t ssid_len = 0;
-
- wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-
- if (req->n_ssids) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ goto out_sleep;
}
- mutex_lock(&wl->mutex);
-
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
-
+out_sleep:
wl1251_ps_elp_sleep(wl);
out:
@@ -1093,9 +947,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1251_cmd_ps_mode mode;
struct wl1251 *wl = hw->priv;
- struct sk_buff *beacon;
+ struct sk_buff *beacon, *skb;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1109,7 +962,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
- ret = wl1251_build_null_data(wl);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
+
+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
+ skb->data, skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1251_build_qos_null_data(wl);
if (ret < 0)
goto out;
@@ -1124,27 +987,21 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
- wl->dtim_period = bss_conf->dtim_period;
- ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
- wl->dtim_period);
- wl->aid = bss_conf->aid;
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
- ret = wl1251_build_ps_poll(wl, wl->aid);
+ ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
+ skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
- ret = wl1251_acx_aid(wl, wl->aid);
+ ret = wl1251_acx_aid(wl, bss_conf->aid);
if (ret < 0)
goto out_sleep;
-
- /* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
- mode = STATION_POWER_SAVE_MODE;
- ret = wl1251_ps_set_mode(wl, mode);
- if (ret < 0)
- goto out_sleep;
- }
} else {
/* use defaults when not associated */
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
@@ -1176,7 +1033,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
if (ret < 0) {
wl1251_warning("Set ctsprotect failed %d", ret);
- goto out;
+ goto out_sleep;
}
}
@@ -1187,7 +1044,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0) {
dev_kfree_skb(beacon);
- goto out;
+ goto out_sleep;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1196,13 +1053,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
dev_kfree_skb(beacon);
if (ret < 0)
- goto out;
+ goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
wl->channel, wl->dtim_period);
if (ret < 0)
- goto out;
+ goto out_sleep;
}
out_sleep:
@@ -1273,6 +1130,49 @@ static struct ieee80211_channel wl1251_channels[] = {
{ .hw_value = 13, .center_freq = 2472},
};
+static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ enum wl1251_acx_ps_scheme ps_scheme;
+ struct wl1251 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* mac80211 uses units of 32 usec */
+ ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop * 32);
+ if (ret < 0)
+ goto out_sleep;
+
+ if (params->uapsd)
+ ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
+
+ ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
+ CHANNEL_TYPE_EDCF,
+ wl1251_tx_get_queue(queue), ps_scheme,
+ WL1251_ACX_ACK_POLICY_LEGACY);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1251_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1251_band_2ghz = {
.channels = wl1251_channels,
@@ -1293,6 +1193,7 @@ static const struct ieee80211_ops wl1251_ops = {
.hw_scan = wl1251_op_hw_scan,
.bss_info_changed = wl1251_op_bss_info_changed,
.set_rts_threshold = wl1251_op_set_rts_threshold,
+ .conf_tx = wl1251_op_conf_tx,
};
static int wl1251_register_hw(struct wl1251 *wl)
@@ -1332,12 +1233,15 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_BEACON_FILTER;
+ IEEE80211_HW_BEACON_FILTER |
+ IEEE80211_HW_SUPPORTS_UAPSD;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+ wl->hw->queues = 4;
+
ret = wl1251_register_hw(wl);
if (ret)
goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 9931b197ff77..851dfb65e474 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -26,7 +26,8 @@
#include "wl1251_cmd.h"
#include "wl1251_io.h"
-#define WL1251_WAKEUP_TIMEOUT 2000
+/* in ms */
+#define WL1251_WAKEUP_TIMEOUT 100
void wl1251_elp_work(struct work_struct *work)
{
@@ -67,7 +68,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
int wl1251_ps_elp_wakeup(struct wl1251 *wl)
{
- unsigned long timeout;
+ unsigned long timeout, start;
u32 elp_reg;
if (!wl->elp)
@@ -75,6 +76,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
wl1251_debug(DEBUG_PSM, "waking up chip from elp");
+ start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
@@ -95,8 +97,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
- jiffies_to_msecs(jiffies) -
- (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT));
+ jiffies_to_msecs(jiffies - start));
wl->elp = false;
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index f84cc89cbffc..6f229e0990f4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -23,6 +23,7 @@
*/
#include <linux/skbuff.h>
+#include <linux/gfp.h>
#include <net/mac80211.h>
#include "wl1251.h"
@@ -126,7 +127,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
if (wl->rx_current_buffer)
rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
- skb = dev_alloc_skb(length);
+ skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1251_error("Couldn't allocate RX frame");
return;
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 9cc8c323830f..3bfb59bd4635 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -23,6 +23,7 @@
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/spi/wl12xx.h>
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index f85970615849..c8223185efd2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -167,8 +167,7 @@ static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
tx_hdr->expiry_time = cpu_to_le32(1 << 16);
tx_hdr->id = id;
- /* FIXME: how to get the correct queue id? */
- tx_hdr->xmit_queue = 0;
+ tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
wl1251_tx_control(tx_hdr, control, fc);
wl1251_tx_frag_block_num(tx_hdr);
@@ -220,6 +219,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
/* align the buffer on a 4-byte boundary */
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
+ tx_hdr = (struct tx_double_buffer_desc *) skb->data;
} else {
wl1251_info("No handler, fixme!");
return -EINVAL;
@@ -237,8 +237,9 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
wl1251_mem_write(wl, addr, skb->data, len);
- wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x",
- tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate);
+ wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
+ "queue %d", tx_hdr->id, skb, tx_hdr->length,
+ tx_hdr->rate, tx_hdr->xmit_queue);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 7c1c1665c810..55856c6bb97a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -26,6 +26,7 @@
#define __WL1251_TX_H__
#include <linux/bitops.h>
+#include "wl1251_acx.h"
/*
*
@@ -209,6 +210,22 @@ struct tx_result {
u8 done_2;
} __attribute__ ((packed));
+static inline int wl1251_tx_get_queue(int queue)
+{
+ switch (queue) {
+ case 0:
+ return QOS_AC_VO;
+ case 1:
+ return QOS_AC_VI;
+ case 2:
+ return QOS_AC_BE;
+ case 3:
+ return QOS_AC_BK;
+ default:
+ return QOS_AC_BE;
+ }
+}
+
void wl1251_tx_work(struct work_struct *work);
void wl1251_tx_complete(struct wl1251 *wl);
void wl1251_tx_flush(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 94359b1a861f..97ea5096bc8c 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -43,7 +43,7 @@ enum {
DEBUG_SPI = BIT(1),
DEBUG_BOOT = BIT(2),
DEBUG_MAILBOX = BIT(3),
- DEBUG_NETLINK = BIT(4),
+ DEBUG_TESTMODE = BIT(4),
DEBUG_EVENT = BIT(5),
DEBUG_TX = BIT(6),
DEBUG_RX = BIT(7),
@@ -107,11 +107,36 @@ enum {
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
-
#define WL1271_FW_NAME "wl1271-fw.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
+/* NVS data structure */
+#define WL1271_NVS_SECTION_SIZE 468
+
+#define WL1271_NVS_GENERAL_PARAMS_SIZE 57
+#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \
+ (WL1271_NVS_GENERAL_PARAMS_SIZE + 1)
+#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE 17
+#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \
+ (WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1)
+#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE 65
+#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \
+ (WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1)
+#define WL1271_NVS_FEM_COUNT 2
+#define WL1271_NVS_INI_SPARE_SIZE 124
+
+struct wl1271_nvs_file {
+ /* NVS section */
+ u8 nvs[WL1271_NVS_SECTION_SIZE];
+
+ /* INI section */
+ u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED];
+ u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED];
+ u8 dyn_radio_params[WL1271_NVS_FEM_COUNT]
+ [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED];
+ u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE];
+} __attribute__ ((packed));
+
/*
* Enable/disable 802.11a support for WL1273
*/
@@ -276,6 +301,7 @@ struct wl1271_debugfs {
struct dentry *retry_count;
struct dentry *excessive_retries;
+ struct dentry *gpio_power;
};
#define NUM_TX_QUEUES 4
@@ -322,6 +348,17 @@ struct wl1271 {
enum wl1271_state state;
struct mutex mutex;
+#define WL1271_FLAG_STA_RATES_CHANGED (0)
+#define WL1271_FLAG_STA_ASSOCIATED (1)
+#define WL1271_FLAG_JOINED (2)
+#define WL1271_FLAG_GPIO_POWER (3)
+#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
+#define WL1271_FLAG_SCANNING (5)
+#define WL1271_FLAG_IN_ELP (6)
+#define WL1271_FLAG_PSM (7)
+#define WL1271_FLAG_PSM_REQUESTED (8)
+ unsigned long flags;
+
struct wl1271_partition_set part;
struct wl1271_chip chip;
@@ -331,8 +368,7 @@ struct wl1271 {
u8 *fw;
size_t fw_len;
- u8 *nvs;
- size_t nvs_len;
+ struct wl1271_nvs_file *nvs;
u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
@@ -359,7 +395,6 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue;
- bool tx_queue_stopped;
struct work_struct tx_work;
@@ -387,14 +422,15 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
- bool scanning;
struct wl1271_scan scan;
/* Our association ID */
u16 aid;
/* currently configured rate set */
+ u32 sta_rate_set;
u32 basic_rate_set;
+ u32 rate_set;
/* The current band */
enum ieee80211_band band;
@@ -405,18 +441,9 @@ struct wl1271 {
unsigned int rx_config;
unsigned int rx_filter;
- /* is firmware in elp mode */
- bool elp;
-
struct completion *elp_compl;
struct delayed_work elp_work;
- /* we can be in psm, but not in elp, we have to differentiate */
- bool psm;
-
- /* PSM mode requested */
- bool psm_requested;
-
/* retry counter for PSM entries */
u8 psm_entry_retry;
@@ -435,9 +462,6 @@ struct wl1271 {
struct ieee80211_vif *vif;
- /* Used for a workaround to send disconnect before rejoining */
- bool joined;
-
/* Current chipset configuration */
struct conf_drv_settings conf;
@@ -455,11 +479,14 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_MAX_LENGTH 20
-/* WL1271 needs a 200ms sleep after power on */
+/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
+ on in case is has been shut down shortly before */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
static inline bool wl1271_11a_enabled(void)
{
+ /* FIXME: this could be determined based on the NVS-INI file */
#ifdef WL1271_80211A_ENABLED
return true;
#else
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 5cc89bbdac7a..308782421fce 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -27,6 +27,7 @@
#include <linux/platform_device.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
@@ -390,6 +391,35 @@ out:
return ret;
}
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
+{
+ struct acx_dco_itrim_params *dco;
+ struct conf_itrim_settings *c = &wl->conf.itrim;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
+
+ dco = kzalloc(sizeof(*dco), GFP_KERNEL);
+ if (!dco) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dco->enable = c->enable;
+ dco->timeout = cpu_to_le32(c->timeout);
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
+ dco, sizeof(*dco));
+ if (ret < 0) {
+ wl1271_warning("failed to set dco itrim parameters: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(dco);
+ return ret;
+}
+
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
@@ -758,10 +788,11 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
+int wl1271_acx_rate_policies(struct wl1271 *wl)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ int idx = 0;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -773,12 +804,21 @@ int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
goto out;
}
- /* configure one default (one-size-fits-all) rate class */
- acx->rate_class_cnt = cpu_to_le32(1);
- acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
- acx->rate_class[0].short_retry_limit = c->short_retry_limit;
- acx->rate_class[0].long_retry_limit = c->long_retry_limit;
- acx->rate_class[0].aflags = c->aflags;
+ /* configure one basic rate class */
+ idx = ACX_TX_BASIC_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ /* configure one AP supported rate class */
+ idx = ACX_TX_AP_FULL_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
@@ -791,12 +831,14 @@ out:
return ret;
}
-int wl1271_acx_ac_cfg(struct wl1271 *wl)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifsn, u16 txop)
{
struct acx_ac_cfg *acx;
- int i, ret = 0;
+ int ret = 0;
- wl1271_debug(DEBUG_ACX, "acx access category config");
+ wl1271_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
+ "aifs %d txop %d", ac, cw_min, cw_max, aifsn, txop);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
@@ -805,21 +847,16 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl)
goto out;
}
- for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
- struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]);
- acx->ac = c->ac;
- acx->cw_min = c->cw_min;
- acx->cw_max = cpu_to_le16(c->cw_max);
- acx->aifsn = c->aifsn;
- acx->reserved = 0;
- acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
+ acx->ac = ac;
+ acx->cw_min = cw_min;
+ acx->cw_max = cpu_to_le16(cw_max);
+ acx->aifsn = aifsn;
+ acx->tx_op_limit = cpu_to_le16(txop);
- ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
- if (ret < 0) {
- wl1271_warning("Setting of access category "
- "config: %d", ret);
- goto out;
- }
+ ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ac cfg failed: %d", ret);
+ goto out;
}
out:
@@ -827,10 +864,12 @@ out:
return ret;
}
-int wl1271_acx_tid_cfg(struct wl1271 *wl)
+int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+ u8 tsid, u8 ps_scheme, u8 ack_policy,
+ u32 apsd_conf0, u32 apsd_conf1)
{
struct acx_tid_config *acx;
- int i, ret = 0;
+ int ret = 0;
wl1271_debug(DEBUG_ACX, "acx tid config");
@@ -841,21 +880,18 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl)
goto out;
}
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]);
- acx->queue_id = c->queue_id;
- acx->channel_type = c->channel_type;
- acx->tsid = c->tsid;
- acx->ps_scheme = c->ps_scheme;
- acx->ack_policy = c->ack_policy;
- acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
- acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
+ acx->queue_id = queue_id;
+ acx->channel_type = channel_type;
+ acx->tsid = tsid;
+ acx->ps_scheme = ps_scheme;
+ acx->ack_policy = ack_policy;
+ acx->apsd_conf[0] = cpu_to_le32(apsd_conf0);
+ acx->apsd_conf[1] = cpu_to_le32(apsd_conf1);
- ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
- if (ret < 0) {
- wl1271_warning("Setting of tid config failed: %d", ret);
- goto out;
- }
+ ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of tid config failed: %d", ret);
+ goto out;
}
out:
@@ -1012,59 +1048,6 @@ out:
return ret;
}
-int wl1271_acx_smart_reflex(struct wl1271 *wl)
-{
- struct acx_smart_reflex_state *sr_state = NULL;
- struct acx_smart_reflex_config_params *sr_param = NULL;
- int i, ret;
-
- wl1271_debug(DEBUG_ACX, "acx smart reflex");
-
- sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
- if (!sr_param) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
- struct conf_mart_reflex_err_table *e =
- &(wl->conf.init.sr_err_tbl[i]);
-
- sr_param->error_table[i].len = e->len;
- sr_param->error_table[i].upper_limit = e->upper_limit;
- memcpy(sr_param->error_table[i].values, e->values, e->len);
- }
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
- sr_param, sizeof(*sr_param));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
- sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
- if (!sr_state) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* enable smart reflex */
- sr_state->enable = wl->conf.init.sr_enable;
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
- sr_state, sizeof(*sr_state));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
-out:
- kfree(sr_state);
- kfree(sr_param);
- return ret;
-
-}
-
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
@@ -1132,3 +1115,31 @@ out:
kfree(acx);
return ret;
}
+
+int wl1271_acx_pm_config(struct wl1271 *wl)
+{
+ struct wl1271_acx_pm_config *acx = NULL;
+ struct conf_pm_config_settings *c = &wl->conf.pm_config;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx pm config");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
+ acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
+
+ ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx pm config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 2ce0a8128542..aeccc98581eb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -2,7 +2,7 @@
* This file is part of wl1271
*
* Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
- * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -348,7 +348,7 @@ struct acx_beacon_filter_option {
* ACXBeaconFilterEntry (not 221)
* Byte Offset Size (Bytes) Definition
* =========== ============ ==========
- * 0 1 IE identifier
+ * 0 1 IE identifier
* 1 1 Treatment bit mask
*
* ACXBeaconFilterEntry (221)
@@ -381,8 +381,8 @@ struct acx_beacon_filter_ie_table {
struct acx_header header;
u8 num_ie;
- u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
u8 pad[3];
+ u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
} __attribute__ ((packed));
struct acx_conn_monit_params {
@@ -415,23 +415,12 @@ struct acx_bt_wlan_coex {
u8 pad[3];
} __attribute__ ((packed));
-struct acx_smart_reflex_state {
+struct acx_dco_itrim_params {
struct acx_header header;
u8 enable;
u8 padding[3];
-} __attribute__ ((packed));
-
-struct smart_reflex_err_table {
- u8 len;
- s8 upper_limit;
- s8 values[14];
-} __attribute__ ((packed));
-
-struct acx_smart_reflex_config_params {
- struct acx_header header;
-
- struct smart_reflex_err_table error_table[3];
+ __le32 timeout;
} __attribute__ ((packed));
#define PTA_ANTENNA_TYPE_DEF (0)
@@ -837,6 +826,9 @@ struct acx_rate_class {
u8 reserved;
};
+#define ACX_TX_BASIC_RATE 0
+#define ACX_TX_AP_FULL_RATE 1
+#define ACX_TX_RATE_POLICY_CNT 2
struct acx_rate_policy {
struct acx_header header;
@@ -877,8 +869,8 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __attribute__ ((packed));
-#define ACX_RX_MEM_BLOCKS 64
-#define ACX_TX_MIN_MEM_BLOCKS 64
+#define ACX_RX_MEM_BLOCKS 70
+#define ACX_TX_MIN_MEM_BLOCKS 40
#define ACX_TX_DESCRIPTORS 32
#define ACX_NUM_SSID_PROFILES 1
@@ -969,6 +961,13 @@ struct wl1271_acx_arp_filter {
used. */
} __attribute__((packed));
+struct wl1271_acx_pm_config {
+ struct acx_header header;
+
+ __le32 host_clk_settling_time;
+ u8 host_fast_wakeup_support;
+ u8 padding[3];
+} __attribute__ ((packed));
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
@@ -1027,13 +1026,13 @@ enum {
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
- ACX_SET_SMART_REFLEX_STATE = 0x005B,
- ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
+ ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
DOT11_RTS_THRESHOLD = 0x1013,
DOT11_GROUP_ADDRESS_TBL = 0x1014,
+ ACX_PM_CONFIG = 0x1016,
MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
@@ -1056,6 +1055,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
void *mc_list, u32 mc_list_len);
int wl1271_acx_service_period_timeout(struct wl1271 *wl);
int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
int wl1271_acx_conn_monit_params(struct wl1271 *wl);
@@ -1069,9 +1069,12 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
-int wl1271_acx_ac_cfg(struct wl1271 *wl);
-int wl1271_acx_tid_cfg(struct wl1271 *wl);
+int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+ u8 tsid, u8 ps_scheme, u8 ack_policy,
+ u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl);
int wl1271_acx_tx_config_options(struct wl1271 *wl);
int wl1271_acx_mem_cfg(struct wl1271 *wl);
@@ -1081,5 +1084,6 @@ int wl1271_acx_smart_reflex(struct wl1271 *wl);
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
u8 version);
+int wl1271_acx_pm_config(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index b7c96454cca3..024356263065 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -22,11 +22,13 @@
*/
#include <linux/gpio.h>
+#include <linux/slab.h>
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_boot.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_event.h"
static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
@@ -93,19 +95,19 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
u32 cpu_ctrl;
/* 10.5.0 run the firmware (I) */
- cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL);
+ cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL);
/* 10.5.1 run the firmware (II) */
cpu_ctrl |= flag;
- wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
+ wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
static void wl1271_boot_fw_version(struct wl1271 *wl)
{
struct wl1271_static_data static_data;
- wl1271_spi_read(wl, wl->cmd_box_addr,
- &static_data, sizeof(static_data), false);
+ wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
+ false);
strncpy(wl->chip.fw_ver, static_data.fw_version,
sizeof(wl->chip.fw_ver));
@@ -164,7 +166,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
p, addr);
- wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false);
+ wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
chunk_num++;
}
@@ -175,7 +177,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
fw_data_len % CHUNK_SIZE, p, addr);
- wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
+ wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
kfree(chunk);
return 0;
@@ -219,23 +221,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
size_t nvs_len, burst_len;
int i;
u32 dest_addr, val;
- u8 *nvs_ptr, *nvs, *nvs_aligned;
+ u8 *nvs_ptr, *nvs_aligned;
- nvs = wl->nvs;
- if (nvs == NULL)
+ if (wl->nvs == NULL)
return -ENODEV;
- nvs_ptr = nvs;
-
- nvs_len = wl->nvs_len;
-
- /* Update the device MAC address into the nvs */
- nvs[11] = wl->mac_addr[0];
- nvs[10] = wl->mac_addr[1];
- nvs[6] = wl->mac_addr[2];
- nvs[5] = wl->mac_addr[3];
- nvs[4] = wl->mac_addr[4];
- nvs[3] = wl->mac_addr[5];
+ /* only the first part of the NVS needs to be uploaded */
+ nvs_len = sizeof(wl->nvs->nvs);
+ nvs_ptr = (u8 *)wl->nvs->nvs;
/*
* Layout before the actual NVS tables:
@@ -265,7 +258,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT,
"nvs burst write 0x%x: 0x%x",
dest_addr, val);
- wl1271_spi_write32(wl, dest_addr, val);
+ wl1271_write32(wl, dest_addr, val);
nvs_ptr += 4;
dest_addr += 4;
@@ -277,7 +270,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
* is 7 bytes further.
*/
nvs_ptr += 7;
- nvs_len -= nvs_ptr - nvs;
+ nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs;
nvs_len = ALIGN(nvs_len, 4);
/* FIXME: The driver sets the partition here, but this is not needed,
@@ -286,15 +279,20 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
wl1271_set_partition(wl, &part_table[PART_WORK]);
/* Copy the NVS tables to a new block to ensure alignment */
- nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
- if (!nvs_aligned)
- return -ENOMEM;
+ /* FIXME: We jump 3 more bytes before uploading the NVS. It seems
+ that our NVS files have three extra zeros here. I'm not sure whether
+ the problem is in our NVS generation or we should really jumpt these
+ 3 bytes here */
+ nvs_ptr += 3;
+
+ nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); if
+ (!nvs_aligned) return -ENOMEM;
/* And finally we upload the NVS tables */
/* FIXME: In wl1271, we upload everything at once.
No endianness handling needed here?! The ref driver doesn't do
anything about it at this point */
- wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
+ wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
kfree(nvs_aligned);
return 0;
@@ -303,9 +301,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
{
enable_irq(wl->irq);
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
- wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
}
static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -314,13 +312,12 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
u32 boot_data;
/* perform soft reset */
- wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET,
- ACX_SLV_SOFT_RESET_BIT);
+ wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
/* SOFT_RESET is self clearing */
timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
while (1) {
- boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET);
+ boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET);
wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
break;
@@ -336,10 +333,10 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
}
/* disable Rx/Tx */
- wl1271_spi_write32(wl, ENABLE, 0x0);
+ wl1271_write32(wl, ENABLE, 0x0);
/* disable auto calibration on start*/
- wl1271_spi_write32(wl, SPARE_A2, 0xffff);
+ wl1271_write32(wl, SPARE_A2, 0xffff);
return 0;
}
@@ -351,7 +348,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
- chip_id = wl1271_spi_read32(wl, CHIP_ID_B);
+ chip_id = wl1271_read32(wl, CHIP_ID_B);
wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
@@ -364,8 +361,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
- interrupt = wl1271_spi_read32(wl,
- ACX_REG_INTERRUPT_NO_CLEAR);
+ interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
if (interrupt == 0xffffffff) {
wl1271_error("error reading hardware complete "
@@ -374,8 +370,8 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
- WL1271_ACX_INTR_INIT_COMPLETE);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_INIT_COMPLETE);
break;
}
}
@@ -387,10 +383,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
}
/* get hardware config command mail box */
- wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR);
+ wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR);
/* get hardware config event mail box */
- wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
+ wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
/* set the working partition to its "running" mode offset */
wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -463,9 +459,9 @@ int wl1271_boot(struct wl1271 *wl)
wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
}
- wl1271_spi_write32(wl, PLL_PARAMETERS, clk);
+ wl1271_write32(wl, PLL_PARAMETERS, clk);
- pause = wl1271_spi_read32(wl, PLL_PARAMETERS);
+ pause = wl1271_read32(wl, PLL_PARAMETERS);
wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
@@ -474,10 +470,10 @@ int wl1271_boot(struct wl1271 *wl)
* 0x3ff (magic number ). How does
* this work?! */
pause |= WU_COUNTER_PAUSE_VAL;
- wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause);
+ wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
/* Continue the ELP wake up sequence */
- wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
udelay(500);
wl1271_set_partition(wl, &part_table[PART_DRPW]);
@@ -487,18 +483,18 @@ int wl1271_boot(struct wl1271 *wl)
before taking DRPw out of reset */
wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
- clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START);
+ clk = wl1271_read32(wl, DRPW_SCRATCH_START);
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
/* 2 */
clk |= (REF_CLOCK << 1) << 4;
- wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk);
+ wl1271_write32(wl, DRPW_SCRATCH_START, clk);
wl1271_set_partition(wl, &part_table[PART_WORK]);
/* Disable interrupts */
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
ret = wl1271_boot_soft_reset(wl);
if (ret < 0)
@@ -513,23 +509,22 @@ int wl1271_boot(struct wl1271 *wl)
* ACX_EEPROMLESS_IND_REG */
wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
- wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG,
- ACX_EEPROMLESS_IND_REG);
+ wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
- tmp = wl1271_spi_read32(wl, CHIP_ID_B);
+ tmp = wl1271_read32(wl, CHIP_ID_B);
wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
/* 6. read the EEPROM parameters */
- tmp = wl1271_spi_read32(wl, SCR_PAD2);
+ tmp = wl1271_read32(wl, SCR_PAD2);
ret = wl1271_boot_write_irq_polarity(wl);
if (ret < 0)
goto out;
/* FIXME: Need to check whether this is really what we want */
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_ALL_EVENTS_VECTOR);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
+ WL1271_ACX_ALL_EVENTS_VECTOR);
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 886a9bc39cc1..e7832f3318eb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -26,10 +26,12 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include "wl1271.h"
#include "wl1271_reg.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_acx.h"
#include "wl12xx_80211.h"
#include "wl1271_cmd.h"
@@ -57,13 +59,13 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
WARN_ON(len % 4 != 0);
- wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false);
+ wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
- intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
@@ -73,13 +75,13 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
msleep(1);
- intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
}
/* read back the status code of the command */
if (res_len == 0)
res_len = sizeof(struct wl1271_cmd_header);
- wl1271_spi_read(wl, wl->cmd_box_addr, cmd, res_len, false);
+ wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false);
status = le16_to_cpu(cmd->status);
if (status != CMD_STATUS_SUCCESS) {
@@ -87,8 +89,8 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = -EIO;
}
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
- WL1271_ACX_INTR_CMD_COMPLETE);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_CMD_COMPLETE);
out:
return ret;
@@ -191,23 +193,19 @@ static int wl1271_cmd_cal(struct wl1271 *wl)
int wl1271_cmd_general_parms(struct wl1271 *wl)
{
struct wl1271_general_parms_cmd *gen_parms;
- struct conf_general_parms *g = &wl->conf.init.genparam;
int ret;
+ if (!wl->nvs)
+ return -ENODEV;
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
- gen_parms->ref_clk = g->ref_clk;
- gen_parms->settling_time = g->settling_time;
- gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
- gen_parms->dc2dcmode = g->dc2dcmode;
- gen_parms->single_dual_band = g->single_dual_band;
- gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
- gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
- gen_parms->settings = g->settings;
+ memcpy(gen_parms->params, wl->nvs->general_params,
+ WL1271_NVS_GENERAL_PARAMS_SIZE);
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
if (ret < 0)
@@ -220,8 +218,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
int wl1271_cmd_radio_parms(struct wl1271 *wl)
{
struct wl1271_radio_parms_cmd *radio_parms;
- struct conf_radio_parms *r = &wl->conf.init.radioparam;
- int i, ret;
+ struct conf_radio_parms *rparam = &wl->conf.init.radioparam;
+ int ret;
+
+ if (!wl->nvs)
+ return -ENODEV;
radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
if (!radio_parms)
@@ -229,60 +230,13 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
- /* Static radio parameters */
- radio_parms->rx_trace_loss = r->rx_trace_loss;
- radio_parms->tx_trace_loss = r->tx_trace_loss;
- memcpy(radio_parms->rx_rssi_and_proc_compens,
- r->rx_rssi_and_proc_compens,
- CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
-
- memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->rx_rssi_and_proc_compens_5,
- r->rx_rssi_and_proc_compens_5,
- CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
-
- /* Dynamic radio parameters */
- radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
- radio_parms->tx_ref_power = r->tx_ref_power;
- radio_parms->tx_offset_db = r->tx_offset_db;
-
- memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
- CONF_NUMBER_OF_RATE_GROUPS);
-
- memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
- CONF_NUMBER_OF_CHANNELS_2_4);
- memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
- CONF_NUMBER_OF_CHANNELS_2_4);
- memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
-
- radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
-
- for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
- radio_parms->tx_ref_pd_voltage_5[i] =
- cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
- memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->tx_rate_limits_normal_5,
- r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_rate_limits_degraded_5,
- r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_channel_limits_ofdm_5,
- r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
- memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->rx_fem_insertion_loss_5,
- r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
+ memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params,
+ WL1271_NVS_STAT_RADIO_PARAMS_SIZE);
+ memcpy(radio_parms->dyn_radio_params,
+ wl->nvs->dyn_radio_params[rparam->fem],
+ WL1271_NVS_DYN_RADIO_PARAMS_SIZE);
+
+ /* FIXME: current NVS is missing 5GHz parameters */
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
radio_parms, sizeof(*radio_parms));
@@ -311,19 +265,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
do_cal = false;
}
- /* FIXME: This is a workaround, because with the current stack, we
- * cannot know when we have disassociated. So, if we have already
- * joined, we disconnect before joining again. */
- if (wl->joined) {
- ret = wl1271_cmd_disconnect(wl);
- if (ret < 0) {
- wl1271_error("failed to disconnect before rejoining");
- goto out;
- }
-
- wl->joined = false;
- }
-
join = kzalloc(sizeof(*join), GFP_KERNEL);
if (!join) {
ret = -ENOMEM;
@@ -388,8 +329,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
goto out_free;
}
- wl->joined = true;
-
/*
* ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
* simplify locking we just sleep instead, for now
@@ -487,7 +426,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
return 0;
}
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
@@ -501,7 +440,8 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
goto out;
}
- cmd->channel = channel;
+ /* the channel here is only used for calibration, so hardcoded to 1 */
+ cmd->channel = 1;
if (enable) {
cmd_rx = CMD_ENABLE_RX;
@@ -514,29 +454,29 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("rx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
goto out;
}
wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
return ret;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
out:
kfree(cmd);
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -557,7 +497,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
}
ps_params->ps_mode = ps_mode;
- ps_params->send_null_data = 1;
+ ps_params->send_null_data = send;
ps_params->retries = 5;
ps_params->hang_over_period = 128;
ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
@@ -636,7 +576,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
channels = wl->hw->wiphy->bands[ieee_band]->channels;
n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
- if (wl->scanning)
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
@@ -711,7 +651,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
- wl->scanning = true;
+ set_bit(WL1271_FLAG_SCANNING, &wl->flags);
if (wl1271_11a_enabled()) {
wl->scan.state = band;
if (band == WL1271_SCAN_BAND_DUAL) {
@@ -729,7 +669,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
goto out;
}
@@ -777,7 +717,7 @@ out:
return ret;
}
-static int wl1271_build_basic_rates(char *rates, u8 band)
+static int wl1271_build_basic_rates(u8 *rates, u8 band)
{
u8 index = 0;
@@ -804,7 +744,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band)
return index;
}
-static int wl1271_build_extended_rates(char *rates, u8 band)
+static int wl1271_build_extended_rates(u8 *rates, u8 band)
{
u8 index = 0;
@@ -1003,7 +943,7 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("could not set keys");
- goto out;
+ goto out;
}
out:
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index b4fa4acb9229..2dc06c73532b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -37,8 +37,8 @@ int wl1271_cmd_join(struct wl1271 *wl);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
@@ -428,67 +428,24 @@ struct wl1271_general_parms_cmd {
struct wl1271_cmd_test_header test;
- u8 ref_clk;
- u8 settling_time;
- u8 clk_valid_on_wakeup;
- u8 dc2dcmode;
- u8 single_dual_band;
-
- u8 tx_bip_fem_autodetect;
- u8 tx_bip_fem_manufacturer;
- u8 settings;
+ u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE];
+ s8 reserved[23];
} __attribute__ ((packed));
+#define WL1271_STAT_RADIO_PARAMS_5_SIZE 29
+#define WL1271_DYN_RADIO_PARAMS_5_SIZE 104
+
struct wl1271_radio_parms_cmd {
struct wl1271_cmd_header header;
struct wl1271_cmd_test_header test;
- /* Static radio parameters */
- /* 2.4GHz */
- u8 rx_trace_loss;
- u8 tx_trace_loss;
- s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /* 5GHz */
- u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /* Dynamic radio parameters */
- /* 2.4GHz */
- __le16 tx_ref_pd_voltage;
- s8 tx_ref_power;
- s8 tx_offset_db;
-
- s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
-
- u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
- u8 rx_fem_insertion_loss;
+ u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE];
+ u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE];
- u8 padding2;
-
- /* 5GHz */
- __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
-
- s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
- s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
-
- /* FIXME: this is inconsistent with the types for 2.4GHz */
- s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
-
- u8 padding3[2];
+ u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE];
+ u8 reserved;
+ u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE];
} __attribute__ ((packed));
struct wl1271_cmd_cal_channel_tune {
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 565373ede265..6f9e75cc5640 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -258,7 +258,8 @@ struct conf_rx_settings {
#define CONF_TX_MAX_RATE_CLASSES 8
#define CONF_TX_RATE_MASK_UNSPECIFIED 0
-#define CONF_TX_RATE_MASK_ALL 0x1eff
+#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
struct conf_tx_rate_class {
@@ -722,31 +723,6 @@ struct conf_conn_settings {
u8 psm_entry_retries;
};
-#define CONF_SR_ERR_TBL_MAX_VALUES 14
-
-struct conf_mart_reflex_err_table {
- /*
- * Length of the error table values table.
- *
- * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
- */
- u8 len;
-
- /*
- * Smart Reflex error table upper limit.
- *
- * Range: s8
- */
- s8 upper_limit;
-
- /*
- * Smart Reflex error table values.
- *
- * Range: s8
- */
- s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
-};
-
enum {
CONF_REF_CLK_19_2_E,
CONF_REF_CLK_26_E,
@@ -759,64 +735,6 @@ enum single_dual_band_enum {
CONF_DUAL_BAND
};
-struct conf_general_parms {
- /*
- * RF Reference Clock type / speed
- *
- * Range: CONF_REF_CLK_*
- */
- u8 ref_clk;
-
- /*
- * Settling time of the reference clock after boot.
- *
- * Range: u8
- */
- u8 settling_time;
-
- /*
- * Flag defining whether clock is valid on wakeup.
- *
- * Range: 0 - not valid on wakeup, 1 - valid on wakeup
- */
- u8 clk_valid_on_wakeup;
-
- /*
- * DC-to-DC mode.
- *
- * Range: Unknown
- */
- u8 dc2dcmode;
-
- /*
- * Flag defining whether used as single or dual-band.
- *
- * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
- */
- u8 single_dual_band;
-
- /*
- * TX bip fem autodetect flag.
- *
- * Range: Unknown
- */
- u8 tx_bip_fem_autodetect;
-
- /*
- * TX bip gem manufacturer.
- *
- * Range: Unknown
- */
- u8 tx_bip_fem_manufacturer;
-
- /*
- * Settings flags.
- *
- * Range: Unknown
- */
- u8 settings;
-};
-
#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
#define CONF_NUMBER_OF_SUB_BANDS_5 7
#define CONF_NUMBER_OF_RATE_GROUPS 6
@@ -825,87 +743,43 @@ struct conf_general_parms {
struct conf_radio_parms {
/*
- * Static radio parameters for 2.4GHz
- *
- * Range: unknown
- */
- u8 rx_trace_loss;
- u8 tx_trace_loss;
- s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /*
- * Static radio parameters for 5GHz
- *
- * Range: unknown
- */
- u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /*
- * Dynamic radio parameters for 2.4GHz
+ * FEM parameter set to use
*
- * Range: unknown
+ * Range: 0 or 1
*/
- s16 tx_ref_pd_voltage;
- s8 tx_ref_power;
- s8 tx_offset_db;
-
- s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
-
- u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
- u8 rx_fem_insertion_loss;
+ u8 fem;
+};
+struct conf_init_settings {
/*
- * Dynamic radio parameters for 5GHz
- *
- * Range: unknown
+ * Configure radio parameters.
*/
- s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
-
- s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
- s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
+ struct conf_radio_parms radioparam;
- /* FIXME: this is inconsistent with the types for 2.4GHz */
- s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
};
-#define CONF_SR_ERR_TBL_COUNT 3
+struct conf_itrim_settings {
+ /* enable dco itrim */
+ u8 enable;
-struct conf_init_settings {
- /*
- * Configure Smart Reflex error table values.
- */
- struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
+ /* moderation timeout in microsecs from the last TX */
+ u32 timeout;
+};
+struct conf_pm_config_settings {
/*
- * Smart Reflex enable flag.
+ * Host clock settling time
*
- * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
- */
- u8 sr_enable;
-
- /*
- * Configure general parameters.
+ * Range: 0 - 30000 us
*/
- struct conf_general_parms genparam;
+ u32 host_clk_settling_time;
/*
- * Configure radio parameters.
+ * Host fast wakeup support
+ *
+ * Range: true, false
*/
- struct conf_radio_parms radioparam;
-
+ bool host_fast_wakeup_support;
};
struct conf_drv_settings {
@@ -914,6 +788,8 @@ struct conf_drv_settings {
struct conf_tx_settings tx;
struct conf_conn_settings conn;
struct conf_init_settings init;
+ struct conf_itrim_settings itrim;
+ struct conf_pm_config_settings pm_config;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index c1805e5f8964..3f7ff8d0cf5a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -24,6 +24,7 @@
#include "wl1271_debugfs.h"
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include "wl1271.h"
#include "wl1271_acx.h"
@@ -237,6 +238,64 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1271_open_file_generic,
};
+static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "%d\n", state);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t gpio_power_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in gpio_power");
+ goto out;
+ }
+
+ if (value) {
+ wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ } else {
+ wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations gpio_power_ops = {
+ .read = gpio_power_read,
+ .write = gpio_power_write,
+ .open = wl1271_open_file_generic
+};
+
static void wl1271_debugfs_delete_files(struct wl1271 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -333,6 +392,8 @@ static void wl1271_debugfs_delete_files(struct wl1271 *wl)
DEBUGFS_DEL(tx_queue_len);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
+
+ DEBUGFS_DEL(gpio_power);
}
static int wl1271_debugfs_add_files(struct wl1271 *wl)
@@ -434,6 +495,8 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
+ DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
+
out:
if (ret < 0)
wl1271_debugfs_delete_files(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index d13fdd99c85c..7468ef10194b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -24,6 +24,7 @@
#include "wl1271.h"
#include "wl1271_reg.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_ps.h"
#include "wl12xx_80211.h"
@@ -35,7 +36,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- if (wl->scanning) {
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
NULL, size);
@@ -43,7 +44,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
* to the wl1271_cmd_scan function that we are not
* scanning as it checks that.
*/
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
wl->scan.active,
wl->scan.high_prio,
@@ -62,7 +63,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
}
}
return 0;
@@ -78,25 +79,61 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
switch (mbox->ps_status) {
case EVENT_ENTER_POWER_SAVE_FAIL:
- if (!wl->psm) {
+ wl1271_debug(DEBUG_PSM, "PSM entry failed");
+
+ if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ /* remain in active mode */
wl->psm_entry_retry = 0;
break;
}
if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) {
wl->psm_entry_retry++;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
+ true);
} else {
wl1271_error("PSM entry failed, giving up.\n");
+ /* FIXME: this may need to be reconsidered. for now it
+ is not possible to indicate to the mac80211
+ afterwards that PSM entry failed. To maximize
+ functionality (receiving data and remaining
+ associated) make sure that we are in sync with the
+ AP in regard of PSM mode. */
+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
+ false);
wl->psm_entry_retry = 0;
- *beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
wl->psm_entry_retry = 0;
+
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, true);
+ if (ret < 0)
+ break;
+
+ /* enable beacon early termination */
+ ret = wl1271_acx_bet_enable(wl, true);
+ if (ret < 0)
+ break;
+
+ /* go to extremely low power mode */
+ wl1271_ps_elp_sleep(wl);
+ if (ret < 0)
+ break;
break;
case EVENT_EXIT_POWER_SAVE_FAIL:
- wl1271_info("PSM exit failed");
+ wl1271_debug(DEBUG_PSM, "PSM exit failed");
+
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ wl->psm_entry_retry = 0;
+ break;
+ }
+
+ /* make sure the firmware goes to active mode - the frame to
+ be sent next will indicate to the AP, that we are active. */
+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
+ false);
break;
case EVENT_EXIT_POWER_SAVE_SUCCESS:
default:
@@ -136,7 +173,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* filtering) is enabled. Without PSM, the stack will receive all
* beacons and can detect beacon loss by itself.
*/
- if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
+ if (vector & BSS_LOSE_EVENT_ID &&
+ test_bit(WL1271_FLAG_PSM, &wl->flags)) {
wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
/* indicate to the stack, that beacons have been lost */
@@ -150,7 +188,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
return ret;
}
- if (beacon_loss) {
+ if (wl->vif && beacon_loss) {
/* Obviously, it's dangerous to release the mutex while
we are holding many of the variables in the wl struct.
That's why it's done last in the function, and care must
@@ -177,14 +215,14 @@ int wl1271_event_unmask(struct wl1271 *wl)
void wl1271_event_mbox_config(struct wl1271 *wl)
{
- wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
+ wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
}
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
{
struct event_mailbox mbox;
int ret;
@@ -195,8 +233,8 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
return -EINVAL;
/* first we read the mbox descriptor */
- wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox,
- sizeof(struct event_mailbox), false);
+ wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox,
+ sizeof(struct event_mailbox), false);
/* process the descriptor */
ret = wl1271_event_process(wl, &mbox);
@@ -204,9 +242,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
return ret;
/* then we let the firmware know it can go on...*/
- if (do_ack)
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
- INTR_TRIG_EVENT_ACK);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 4e3f55ebb1a8..278f9206aa56 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -112,6 +112,6 @@ struct event_mailbox {
int wl1271_event_unmask(struct wl1271 *wl);
void wl1271_event_mbox_config(struct wl1271 *wl);
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 11249b436cf1..d189e8fe05a6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "wl1271_init.h"
#include "wl12xx_80211.h"
@@ -49,7 +50,7 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret;
@@ -113,7 +114,7 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
return 0;
}
-static int wl1271_init_phy_config(struct wl1271 *wl)
+int wl1271_init_phy_config(struct wl1271 *wl)
{
int ret;
@@ -156,7 +157,7 @@ static int wl1271_init_beacon_filter(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_pta(struct wl1271 *wl)
+int wl1271_init_pta(struct wl1271 *wl)
{
int ret;
@@ -171,7 +172,7 @@ static int wl1271_init_pta(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_energy_detection(struct wl1271 *wl)
+int wl1271_init_energy_detection(struct wl1271 *wl)
{
int ret;
@@ -195,7 +196,9 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
int wl1271_hw_init(struct wl1271 *wl)
{
- int ret;
+ struct conf_tx_ac_category *conf_ac;
+ struct conf_tx_tid *conf_tid;
+ int ret, i;
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
@@ -229,6 +232,10 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ ret = wl1271_acx_dco_itrim_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Initialize connection monitoring thresholds */
ret = wl1271_acx_conn_monit_params(wl);
if (ret < 0)
@@ -270,22 +277,36 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
/* Default TID configuration */
- ret = wl1271_acx_tid_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
/* Default AC configuration */
- ret = wl1271_acx_ac_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
+ conf_ac->cw_max, conf_ac->aifsn,
+ conf_ac->tx_op_limit);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
/* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
+ ret = wl1271_acx_rate_policies(wl);
if (ret < 0)
goto out_free_memmap;
/* Enable data path */
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
@@ -299,8 +320,8 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Configure smart reflex */
- ret = wl1271_acx_smart_reflex(wl);
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index 930677fbe852..bc26f8c53b91 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -27,6 +27,10 @@
#include "wl1271.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_pta(struct wl1271 *wl);
+int wl1271_init_energy_detection(struct wl1271 *wl);
int wl1271_hw_init(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
new file mode 100644
index 000000000000..5cd94d5666c2
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -0,0 +1,213 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crc7.h>
+#include <linux/spi/spi.h>
+
+#include "wl1271.h"
+#include "wl12xx_80211.h"
+#include "wl1271_spi.h"
+#include "wl1271_io.h"
+
+static int wl1271_translate_addr(struct wl1271 *wl, int addr)
+{
+ /*
+ * To translate, first check to which window of addresses the
+ * particular address belongs. Then subtract the starting address
+ * of that window from the address. Then, add offset of the
+ * translated region.
+ *
+ * The translated regions occur next to each other in physical device
+ * memory, so just add the sizes of the preceeding address regions to
+ * get the offset to the new region.
+ *
+ * Currently, only the two first regions are addressed, and the
+ * assumption is that all addresses will fall into either of those
+ * two.
+ */
+ if ((addr >= wl->part.reg.start) &&
+ (addr < wl->part.reg.start + wl->part.reg.size))
+ return addr - wl->part.reg.start + wl->part.mem.size;
+ else
+ return addr - wl->part.mem.start;
+}
+
+/* Set the SPI partitions to access the chip addresses
+ *
+ * To simplify driver code, a fixed (virtual) memory map is defined for
+ * register and memory addresses. Because in the chipset, in different stages
+ * of operation, those addresses will move around, an address translation
+ * mechanism is required.
+ *
+ * There are four partitions (three memory and one register partition),
+ * which are mapped to two different areas of the hardware memory.
+ *
+ * Virtual address
+ * space
+ *
+ * | |
+ * ...+----+--> mem.start
+ * Physical address ... | |
+ * space ... | | [PART_0]
+ * ... | |
+ * 00000000 <--+----+... ...+----+--> mem.start + mem.size
+ * | | ... | |
+ * |MEM | ... | |
+ * | | ... | |
+ * mem.size <--+----+... | | {unused area)
+ * | | ... | |
+ * |REG | ... | |
+ * mem.size | | ... | |
+ * + <--+----+... ...+----+--> reg.start
+ * reg.size | | ... | |
+ * |MEM2| ... | | [PART_1]
+ * | | ... | |
+ * ...+----+--> reg.start + reg.size
+ * | |
+ *
+ */
+int wl1271_set_partition(struct wl1271 *wl,
+ struct wl1271_partition_set *p)
+{
+ /* copy partition info */
+ memcpy(&wl->part, p, sizeof(*p));
+
+ wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
+ p->mem.start, p->mem.size);
+ wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
+ p->reg.start, p->reg.size);
+ wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
+ p->mem2.start, p->mem2.size);
+ wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
+ p->mem3.start, p->mem3.size);
+
+ /* write partition info to the chipset */
+ wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
+ wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
+ wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
+ wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
+ wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
+ wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+ wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
+
+ return 0;
+}
+
+void wl1271_io_reset(struct wl1271 *wl)
+{
+ wl1271_spi_reset(wl);
+}
+
+void wl1271_io_init(struct wl1271 *wl)
+{
+ wl1271_spi_init(wl);
+}
+
+void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl1271_spi_raw_write(wl, addr, buf, len, fixed);
+}
+
+void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl1271_spi_raw_read(wl, addr, buf, len, fixed);
+}
+
+void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_spi_raw_read(wl, physical, buf, len, fixed);
+}
+
+void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_spi_raw_write(wl, physical, buf, len, fixed);
+}
+
+u32 wl1271_read32(struct wl1271 *wl, int addr)
+{
+ return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+}
+
+void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+{
+ wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+}
+
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+{
+ /* write address >> 1 + 0x30000 to OCP_POR_CTR */
+ addr = (addr >> 1) + 0x30000;
+ wl1271_write32(wl, OCP_POR_CTR, addr);
+
+ /* write value to OCP_POR_WDATA */
+ wl1271_write32(wl, OCP_DATA_WRITE, val);
+
+ /* write 1 to OCP_CMD */
+ wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE);
+}
+
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
+{
+ u32 val;
+ int timeout = OCP_CMD_LOOP;
+
+ /* write address >> 1 + 0x30000 to OCP_POR_CTR */
+ addr = (addr >> 1) + 0x30000;
+ wl1271_write32(wl, OCP_POR_CTR, addr);
+
+ /* write 2 to OCP_CMD */
+ wl1271_write32(wl, OCP_CMD, OCP_CMD_READ);
+
+ /* poll for data ready */
+ do {
+ val = wl1271_read32(wl, OCP_DATA_READ);
+ } while (!(val & OCP_READY_MASK) && --timeout);
+
+ if (!timeout) {
+ wl1271_warning("Top register access timed out.");
+ return 0xffff;
+ }
+
+ /* check data status and return if OK */
+ if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
+ return val & 0xffff;
+ else {
+ wl1271_warning("Top register access returned error.");
+ return 0xffff;
+ }
+}
+
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
new file mode 100644
index 000000000000..fa9a0b35788f
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -0,0 +1,68 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_IO_H__
+#define __WL1271_IO_H__
+
+struct wl1271;
+
+void wl1271_io_reset(struct wl1271 *wl);
+void wl1271_io_init(struct wl1271 *wl);
+
+/* Raw target IO, address is not translated */
+void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed);
+void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed);
+
+/* Translated target IO */
+void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+u32 wl1271_read32(struct wl1271 *wl, int addr);
+void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
+
+/* Top Register IO */
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+
+int wl1271_set_partition(struct wl1271 *wl,
+ struct wl1271_partition_set *p);
+
+static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
+{
+ wl1271_raw_read(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
+
+ return wl->buffer_32;
+}
+
+static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
+{
+ wl->buffer_32 = val;
+ wl1271_raw_write(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
+}
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b62c00ff42fe..65a1aeba2419 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -33,11 +33,13 @@
#include <linux/vmalloc.h>
#include <linux/spi/wl12xx.h>
#include <linux/inetdevice.h>
+#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_tx.h"
#include "wl1271_rx.h"
@@ -46,6 +48,9 @@
#include "wl1271_debugfs.h"
#include "wl1271_cmd.h"
#include "wl1271_boot.h"
+#include "wl1271_testmode.h"
+
+#define WL1271_BOOT_RETRIES 3
static struct conf_drv_settings default_conf = {
.sg = {
@@ -67,16 +72,17 @@ static struct conf_drv_settings default_conf = {
.ps_poll_timeout = 15,
.upsd_timeout = 15,
.rts_threshold = 2347,
- .rx_cca_threshold = 0xFFEF,
- .irq_blk_threshold = 0,
- .irq_pkt_threshold = USHORT_MAX,
- .irq_timeout = 5,
+ .rx_cca_threshold = 0,
+ .irq_blk_threshold = 0xFFFF,
+ .irq_pkt_threshold = 0,
+ .irq_timeout = 600,
.queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
},
.tx = {
.tx_energy_detection = 0,
.rc_conf = {
- .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED,
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
+ CONF_HW_BIT_RATE_2MBPS,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0
@@ -172,8 +178,8 @@ static struct conf_drv_settings default_conf = {
}
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
- .tx_compl_timeout = 5,
- .tx_compl_threshold = 5
+ .tx_compl_timeout = 700,
+ .tx_compl_threshold = 4
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -186,12 +192,12 @@ static struct conf_drv_settings default_conf = {
.rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
}
},
- .synch_fail_thold = 5,
+ .synch_fail_thold = 10,
.bss_lose_timeout = 100,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
- .ps_poll_threshold = 4,
+ .ps_poll_threshold = 20,
.sig_trigger_count = 2,
.sig_trigger = {
[0] = {
@@ -226,97 +232,17 @@ static struct conf_drv_settings default_conf = {
.psm_entry_retries = 3
},
.init = {
- .sr_err_tbl = {
- [0] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- },
- [1] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
- 0x00 }
- },
- [2] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- }
- },
- .sr_enable = 1,
- .genparam = {
- .ref_clk = CONF_REF_CLK_38_4_E,
- .settling_time = 5,
- .clk_valid_on_wakeup = 0,
- .dc2dcmode = 0,
- .single_dual_band = CONF_SINGLE_BAND,
- .tx_bip_fem_autodetect = 0,
- .tx_bip_fem_manufacturer = 1,
- .settings = 1,
- },
.radioparam = {
- .rx_trace_loss = 10,
- .tx_trace_loss = 10,
- .rx_rssi_and_proc_compens = {
- 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
- 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
- 0x00, 0x0a, 0x14 },
- .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
- .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
- .rx_rssi_and_proc_compens_5 = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00 },
- .tx_ref_pd_voltage = 0x24e,
- .tx_ref_power = 0x78,
- .tx_offset_db = 0x0,
- .tx_rate_limits_normal = {
- 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
- .tx_rate_limits_degraded = {
- 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
- .tx_channel_limits_11b = {
- 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
- 0x22, 0x50 },
- .tx_channel_limits_ofdm = {
- 0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
- 0x20, 0x50 },
- .tx_pdv_rate_offsets = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .tx_ibias = {
- 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
- .rx_fem_insertion_loss = 0x14,
- .tx_ref_pd_voltage_5 = {
- 0x0190, 0x01a4, 0x01c3, 0x01d8,
- 0x020a, 0x021c },
- .tx_ref_power_5 = {
- 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
- .tx_offset_db_5 = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .tx_rate_limits_normal_5 = {
- 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
- .tx_rate_limits_degraded_5 = {
- 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
- .tx_channel_limits_ofdm_5 = {
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50 },
- .tx_pdv_rate_offsets_5 = {
- 0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
- .tx_ibias_5 = {
- 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
- .rx_fem_insertion_loss_5 = {
- 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
+ .fem = 1,
}
+ },
+ .itrim = {
+ .enable = false,
+ .timeout = 50000,
+ },
+ .pm_config = {
+ .host_clk_settling_time = 5000,
+ .host_fast_wakeup_support = false
}
};
@@ -337,15 +263,14 @@ static void wl1271_conf_init(struct wl1271 *wl)
/* apply driver default configuration */
memcpy(&wl->conf, &default_conf, sizeof(default_conf));
-
- if (wl1271_11a_enabled())
- wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
}
static int wl1271_plt_init(struct wl1271 *wl)
{
- int ret;
+ struct conf_tx_ac_category *conf_ac;
+ struct conf_tx_tid *conf_tid;
+ int ret, i;
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
@@ -355,15 +280,89 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_init_mem_config(wl);
+ ret = wl1271_init_templates_config(wl);
if (ret < 0)
return ret;
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
return ret;
+ /* PHY layer config */
+ ret = wl1271_init_phy_config(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ ret = wl1271_acx_dco_itrim_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Bluetooth WLAN coexistence */
+ ret = wl1271_init_pta(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Energy detection */
+ ret = wl1271_init_energy_detection(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Default fragmentation threshold */
+ ret = wl1271_acx_frag_threshold(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Default TID configuration */
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
+
+ /* Default AC configuration */
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
+ conf_ac->cw_max, conf_ac->aifsn,
+ conf_ac->tx_op_limit);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
+
+ /* Enable data path */
+ ret = wl1271_cmd_data_path(wl, 1);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Configure for CAM power saving (ie. always active) */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
return 0;
+
+ out_free_memmap:
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+ return ret;
}
static void wl1271_disable_interrupts(struct wl1271 *wl)
@@ -374,11 +373,13 @@ static void wl1271_disable_interrupts(struct wl1271 *wl)
static void wl1271_power_off(struct wl1271 *wl)
{
wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_power_on(struct wl1271 *wl)
{
wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_fw_status(struct wl1271 *wl,
@@ -387,8 +388,7 @@ static void wl1271_fw_status(struct wl1271 *wl,
u32 total = 0;
int i;
- wl1271_spi_read(wl, FW_STATUS_ADDR, status,
- sizeof(*status), false);
+ wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -435,7 +435,7 @@ static void wl1271_irq_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->intr);
@@ -447,14 +447,13 @@ static void wl1271_irq_work(struct work_struct *work)
intr &= WL1271_INTR_MASK;
if (intr & WL1271_ACX_INTR_EVENT_A) {
- bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0, do_ack);
+ wl1271_event_handle(wl, 0);
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1, true);
+ wl1271_event_handle(wl, 1);
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -478,8 +477,8 @@ static void wl1271_irq_work(struct work_struct *work)
}
out_sleep:
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
wl1271_ps_elp_sleep(wl);
out:
@@ -546,6 +545,40 @@ out:
return ret;
}
+static int wl1271_update_mac_addr(struct wl1271 *wl)
+{
+ int ret = 0;
+ u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
+
+ /* get mac address from the NVS */
+ wl->mac_addr[0] = nvs_ptr[11];
+ wl->mac_addr[1] = nvs_ptr[10];
+ wl->mac_addr[2] = nvs_ptr[6];
+ wl->mac_addr[3] = nvs_ptr[5];
+ wl->mac_addr[4] = nvs_ptr[4];
+ wl->mac_addr[5] = nvs_ptr[3];
+
+ /* FIXME: if it is a zero-address, we should bail out. Now, instead,
+ we randomize an address */
+ if (is_zero_ether_addr(wl->mac_addr)) {
+ static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
+ memcpy(wl->mac_addr, nokia_oui, 3);
+ get_random_bytes(wl->mac_addr + 3, 3);
+
+ /* update this address to the NVS */
+ nvs_ptr[11] = wl->mac_addr[0];
+ nvs_ptr[10] = wl->mac_addr[1];
+ nvs_ptr[6] = wl->mac_addr[2];
+ nvs_ptr[5] = wl->mac_addr[3];
+ nvs_ptr[4] = wl->mac_addr[4];
+ nvs_ptr[3] = wl->mac_addr[5];
+ }
+
+ SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
+
+ return ret;
+}
+
static int wl1271_fetch_nvs(struct wl1271 *wl)
{
const struct firmware *fw;
@@ -558,15 +591,14 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
return ret;
}
- if (fw->size % 4) {
- wl1271_error("nvs size is not multiple of 32 bits: %zu",
- fw->size);
+ if (fw->size != sizeof(struct wl1271_nvs_file)) {
+ wl1271_error("nvs size is not as expected: %zu != %zu",
+ fw->size, sizeof(struct wl1271_nvs_file));
ret = -EILSEQ;
goto out;
}
- wl->nvs_len = fw->size;
- wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL);
+ wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
@@ -574,9 +606,9 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
goto out;
}
- memcpy(wl->nvs, fw->data, wl->nvs_len);
+ memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
- ret = 0;
+ ret = wl1271_update_mac_addr(wl);
out:
release_firmware(fw);
@@ -614,10 +646,11 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
struct wl1271_partition_set partition;
int ret = 0;
+ msleep(WL1271_PRE_POWER_ON_SLEEP);
wl1271_power_on(wl);
msleep(WL1271_POWER_ON_SLEEP);
- wl1271_spi_reset(wl);
- wl1271_spi_init(wl);
+ wl1271_io_reset(wl);
+ wl1271_io_init(wl);
/* We don't need a real memory partition here, because we only want
* to use the registers at this point. */
@@ -632,7 +665,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
/* whal_FwCtrl_BootSm() */
/* 0. read chip id from CHIP_ID */
- wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B);
+ wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
/* 1. check if chip id is valid */
@@ -643,7 +676,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -651,38 +684,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
default:
- wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
+ wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
- goto out_power_off;
+ goto out;
}
if (wl->fw == NULL) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
/* No NVS from netlink, try to get it from the filesystem */
if (wl->nvs == NULL) {
ret = wl1271_fetch_nvs(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
- goto out;
-
-out_power_off:
- wl1271_power_off(wl);
-
out:
return ret;
}
int wl1271_plt_start(struct wl1271 *wl)
{
+ int retries = WL1271_BOOT_RETRIES;
int ret;
mutex_lock(&wl->mutex);
@@ -696,35 +725,43 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->state = WL1271_STATE_PLT;
-
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
-
- ret = wl1271_plt_init(wl);
- if (ret < 0)
- goto out_irq_disable;
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- /* Make sure power saving is disabled */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- goto out_irq_disable;
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- goto out;
+ ret = wl1271_plt_init(wl);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_PLT;
+ wl1271_notice("firmware booted in PLT mode (%s)",
+ wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot in PLT mode failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -762,7 +799,20 @@ out:
static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txinfo->control.sta;
+ unsigned long flags;
+
+ /* peek into the rates configured in the STA entry */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
+ wl->sta_rate_set = sta->supp_rates[conf->channel->band];
+ set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ /* queue the packet */
skb_queue_tail(&wl->tx_queue, skb);
/*
@@ -784,7 +834,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* protected. Maybe fix this by removing the stupid
* variable altogether and checking the real queue state?
*/
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
}
return NETDEV_TX_OK;
@@ -880,6 +930,7 @@ static struct notifier_block wl1271_dev_notifier = {
static int wl1271_op_start(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ int retries = WL1271_BOOT_RETRIES;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -893,30 +944,42 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
goto out;
}
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- ret = wl1271_hw_init(wl);
- if (ret < 0)
- goto out_irq_disable;
-
- wl->state = WL1271_STATE_ON;
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- goto out;
+ ret = wl1271_hw_init(wl);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_ON;
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -944,11 +1007,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
WARN_ON(wl->state != WL1271_STATE_ON);
- if (wl->scanning) {
+ if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, true);
mutex_lock(&wl->mutex);
- wl->scanning = false;
}
wl->state = WL1271_STATE_OFF;
@@ -973,10 +1035,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
- wl->elp = false;
- wl->psm = 0;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
wl->tx_results_count = 0;
@@ -986,7 +1045,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->tx_security_seq_32 = 0;
wl->time_offset = 0;
wl->session_counter = 0;
- wl->joined = false;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl->flags = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -996,13 +1057,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
}
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -1010,9 +1071,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -1032,7 +1093,7 @@ out:
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
@@ -1109,6 +1170,51 @@ out:
}
#endif
+static int wl1271_join_channel(struct wl1271 *wl, int channel)
+{
+ int ret = 0;
+ /* we need to use a dummy BSSID for now */
+ static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
+ 0xad, 0xbe, 0xef };
+
+ /* the dummy join is not required for ad-hoc */
+ if (wl->bss_type == BSS_TYPE_IBSS)
+ goto out;
+
+ /* disable mac filter, so we hear everything */
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+
+ wl->channel = channel;
+ memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
+
+out:
+ return ret;
+}
+
+static int wl1271_unjoin_channel(struct wl1271 *wl)
+{
+ int ret;
+
+ /* to stop listening to a channel, we disconnect */
+ ret = wl1271_cmd_disconnect(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_JOINED, &wl->flags);
+ wl->channel = 0;
+ memset(wl->bssid, 0, ETH_ALEN);
+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+
+out:
+ return ret;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1117,10 +1223,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level);
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
mutex_lock(&wl->mutex);
@@ -1130,35 +1237,55 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (ret < 0)
goto out;
- if (channel != wl->channel) {
- /*
- * We assume that the stack will configure the right channel
- * before associating, so we don't need to send a join
- * command here. We will join the right channel when the
- * BSSID changes
- */
- wl->channel = channel;
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (conf->flags & IEEE80211_CONF_IDLE &&
+ test_bit(WL1271_FLAG_JOINED, &wl->flags))
+ wl1271_unjoin_channel(wl);
+ else if (!(conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_join_channel(wl, channel);
+
+ if (conf->flags & IEEE80211_CONF_IDLE) {
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl1271_acx_rate_policies(wl);
+ }
}
- if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
- wl1271_info("psm enabled");
+ /* if the channel changes while joined, join again */
+ if (channel != wl->channel &&
+ test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ wl->channel = channel;
+ /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0)
+ wl1271_warning("cmd join to update channel failed %d",
+ ret);
+ } else
+ wl->channel = channel;
- wl->psm_requested = true;
+ if (conf->flags & IEEE80211_CONF_PS &&
+ !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ wl1271_info("psm enabled");
+ ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
+ true);
+ }
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- wl->psm_requested) {
+ test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
wl1271_info("psm disabled");
- wl->psm_requested = false;
+ clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
- if (wl->psm)
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags))
+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
+ true);
}
if (conf->power_level != wl->power_level) {
@@ -1350,9 +1477,24 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
wl1271_error("Could not add or replace key");
goto out_sleep;
}
+
+ /* the default WEP key needs to be configured at least once */
+ if (key_type == KEY_WEP) {
+ ret = wl1271_cmd_set_default_wep_key(wl,
+ wl->default_key);
+ if (ret < 0)
+ goto out_sleep;
+ }
break;
case DISABLE_KEY:
+ /* The wl1271 does not allow to remove unicast keys - they
+ will be cleared automatically on next CMD_JOIN. Ignore the
+ request silently, as we dont want the mac80211 to emit
+ an error message. */
+ if (!is_broadcast_ether_addr(addr))
+ break;
+
ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
@@ -1440,20 +1582,21 @@ out:
return ret;
}
-static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
+static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *beacon)
{
- struct ieee80211_supported_band *band;
- u32 enabled_rates = 0;
- int bit;
-
- band = wl->hw->wiphy->bands[wl->band];
- for (bit = 0; bit < band->n_bitrates; bit++) {
- if (basic_rate_set & 0x1)
- enabled_rates |= band->bitrates[bit].hw_value;
- basic_rate_set >>= 1;
+ u8 *ptr = beacon->data +
+ offsetof(struct ieee80211_mgmt, u.beacon.variable);
+
+ /* find the location of the ssid in the beacon */
+ while (ptr < beacon->data + beacon->len) {
+ if (ptr[0] == WLAN_EID_SSID) {
+ wl->ssid_len = ptr[1];
+ memcpy(wl->ssid, ptr+2, wl->ssid_len);
+ return;
+ }
+ ptr += ptr[1];
}
-
- return enabled_rates;
+ wl1271_error("ad-hoc beacon template has no SSID!\n");
}
static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -1463,6 +1606,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
{
enum wl1271_cmd_ps_mode mode;
struct wl1271 *wl = hw->priv;
+ bool do_join = false;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1473,9 +1617,67 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ /* FIXME: This implements rudimentary ad-hoc support -
+ proper templates are on the wish list and notification
+ on when they change. This patch will update the templates
+ on every call to this function. */
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+
+ if (beacon) {
+ struct ieee80211_hdr *hdr;
+
+ wl1271_ssid_set(wl, beacon);
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
+ beacon->data,
+ beacon->len);
+
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out_sleep;
+ }
+
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(
+ IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_PROBE_RESPONSE,
+ beacon->data,
+ beacon->len);
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* Need to update the SSID (for filtering etc) */
+ do_join = true;
+ }
+ }
+
+ if ((changed & BSS_CHANGED_BSSID) &&
+ /*
+ * Now we know the correct bssid, so we send a new join command
+ * and enable the BSSID filter
+ */
+ memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ ret = wl1271_cmd_build_null_data(wl);
+ if (ret < 0) {
+ wl1271_warning("cmd buld null data failed %d",
+ ret);
+ goto out_sleep;
+ }
+
+ /* Need to update the BSSID (for filtering etc) */
+ do_join = true;
+ }
+
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->aid = bss_conf->aid;
+ set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
/*
* with wl1271, we don't need to update the
@@ -1492,15 +1694,16 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
goto out_sleep;
/* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
+ !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, mode);
+ ret = wl1271_ps_set_mode(wl, mode, true);
if (ret < 0)
goto out_sleep;
}
} else {
/* use defaults when not associated */
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
}
@@ -1535,15 +1738,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BASIC_RATES) {
- wl->basic_rate_set = wl1271_enabled_rates_get(
- wl, bss_conf->basic_rates);
-
- ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
+ if (do_join) {
+ ret = wl1271_cmd_join(wl);
if (ret < 0) {
- wl1271_warning("Set rate policies failed %d", ret);
+ wl1271_warning("cmd join failed %d", ret);
goto out_sleep;
}
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
}
out_sleep:
@@ -1553,6 +1754,43 @@ out:
mutex_unlock(&wl->mutex);
}
+static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ CONF_CHANNEL_TYPE_EDCF,
+ wl1271_tx_get_queue(queue),
+ CONF_PS_SCHEME_LEGACY_PSPOLL,
+ CONF_ACK_POLICY_LEGACY, 0, 0);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1271_rates[] = {
@@ -1599,19 +1837,19 @@ static struct ieee80211_rate wl1271_rates[] = {
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
- { .hw_value = 1, .center_freq = 2412},
- { .hw_value = 2, .center_freq = 2417},
- { .hw_value = 3, .center_freq = 2422},
- { .hw_value = 4, .center_freq = 2427},
- { .hw_value = 5, .center_freq = 2432},
- { .hw_value = 6, .center_freq = 2437},
- { .hw_value = 7, .center_freq = 2442},
- { .hw_value = 8, .center_freq = 2447},
- { .hw_value = 9, .center_freq = 2452},
- { .hw_value = 10, .center_freq = 2457},
- { .hw_value = 11, .center_freq = 2462},
- { .hw_value = 12, .center_freq = 2467},
- { .hw_value = 13, .center_freq = 2472},
+ { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
+ { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
+ { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
+ { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
+ { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
+ { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
+ { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
+ { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
+ { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
+ { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
+ { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
+ { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
+ { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
};
/* can't be const, mac80211 writes to this */
@@ -1718,6 +1956,8 @@ static const struct ieee80211_ops wl1271_ops = {
.hw_scan = wl1271_op_hw_scan,
.bss_info_changed = wl1271_op_bss_info_changed,
.set_rts_threshold = wl1271_op_set_rts_threshold,
+ .conf_tx = wl1271_op_conf_tx,
+ CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
static int wl1271_register_hw(struct wl1271 *wl)
@@ -1757,7 +1997,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_PS;
- wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
@@ -1785,24 +2026,17 @@ static struct platform_device wl1271_device = {
};
#define WL1271_DEFAULT_CHANNEL 0
-static int __devinit wl1271_probe(struct spi_device *spi)
+
+static struct ieee80211_hw *wl1271_alloc_hw(void)
{
- struct wl12xx_platform_data *pdata;
struct ieee80211_hw *hw;
struct wl1271 *wl;
- int ret, i;
- static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
-
- pdata = spi->dev.platform_data;
- if (!pdata) {
- wl1271_error("no platform data");
- return -ENODEV;
- }
+ int i;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
wl = hw->priv;
@@ -1811,44 +2045,80 @@ static int __devinit wl1271_probe(struct spi_device *spi)
INIT_LIST_HEAD(&wl->list);
wl->hw = hw;
- dev_set_drvdata(&spi->dev, wl);
- wl->spi = spi;
skb_queue_head_init(&wl->tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->scanning = false;
wl->default_key = 0;
wl->rx_counter = 0;
wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
- wl->elp = false;
- wl->psm = 0;
- wl->psm_requested = false;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
- wl->joined = false;
+ wl->flags = 0;
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
spin_lock_init(&wl->wl_lock);
- /*
- * In case our MAC address is not correctly set,
- * we use a random but Nokia MAC.
- */
- memcpy(wl->mac_addr, nokia_oui, 3);
- get_random_bytes(wl->mac_addr + 3, 3);
-
wl->state = WL1271_STATE_OFF;
mutex_init(&wl->mutex);
+ /* Apply default driver configuration. */
+ wl1271_conf_init(wl);
+
+ return hw;
+}
+
+int wl1271_free_hw(struct wl1271 *wl)
+{
+ ieee80211_unregister_hw(wl->hw);
+
+ wl1271_debugfs_exit(wl);
+
+ kfree(wl->target_mem_map);
+ vfree(wl->fw);
+ wl->fw = NULL;
+ kfree(wl->nvs);
+ wl->nvs = NULL;
+
+ kfree(wl->fw_status);
+ kfree(wl->tx_res_if);
+
+ ieee80211_free_hw(wl->hw);
+
+ return 0;
+}
+
+static int __devinit wl1271_probe(struct spi_device *spi)
+{
+ struct wl12xx_platform_data *pdata;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ wl1271_error("no platform data");
+ return -ENODEV;
+ }
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ dev_set_drvdata(&spi->dev, wl);
+ wl->spi = spi;
+
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
spi->bits_per_word = 32;
@@ -1890,9 +2160,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
}
dev_set_drvdata(&wl1271_device.dev, wl);
- /* Apply default driver configuration. */
- wl1271_conf_init(wl);
-
ret = wl1271_init_ieee80211(wl);
if (ret)
goto out_platform;
@@ -1923,21 +2190,10 @@ static int __devexit wl1271_remove(struct spi_device *spi)
{
struct wl1271 *wl = dev_get_drvdata(&spi->dev);
- ieee80211_unregister_hw(wl->hw);
-
- wl1271_debugfs_exit(wl);
platform_device_unregister(&wl1271_device);
free_irq(wl->irq, wl);
- kfree(wl->target_mem_map);
- vfree(wl->fw);
- wl->fw = NULL;
- kfree(wl->nvs);
- wl->nvs = NULL;
- kfree(wl->fw_status);
- kfree(wl->tx_res_if);
-
- ieee80211_free_hw(wl->hw);
+ wl1271_free_hw(wl);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 507cd91d7eed..e2b1ebf096e8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -24,6 +24,7 @@
#include "wl1271_reg.h"
#include "wl1271_ps.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -39,12 +40,13 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->elp || !wl->psm)
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
+ !test_bit(WL1271_FLAG_PSM, &wl->flags))
goto out;
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
- wl->elp = true;
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
out:
mutex_unlock(&wl->mutex);
@@ -55,7 +57,7 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
cancel_delayed_work(&wl->elp_work);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -70,7 +72,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
u32 start_time = jiffies;
bool pending = false;
- if (!wl->elp)
+ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
wl1271_debug(DEBUG_PSM, "waking up chip from elp");
@@ -101,7 +103,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
}
}
- wl->elp = false;
+ clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start_time));
@@ -117,7 +119,8 @@ out:
return 0;
}
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
+int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
+ bool send)
{
int ret;
@@ -125,25 +128,11 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
case STATION_POWER_SAVE_MODE:
wl1271_debug(DEBUG_PSM, "entering psm");
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, true);
+ ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, send);
if (ret < 0)
return ret;
- /* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, true);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
- if (ret < 0)
- return ret;
-
- wl1271_ps_elp_sleep(wl);
- if (ret < 0)
- return ret;
-
- wl->psm = 1;
+ set_bit(WL1271_FLAG_PSM, &wl->flags);
break;
case STATION_ACTIVE_MODE:
default:
@@ -162,11 +151,11 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
if (ret < 0)
return ret;
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, send);
if (ret < 0)
return ret;
- wl->psm = 0;
+ clear_bit(WL1271_FLAG_PSM, &wl->flags);
break;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index 779653d0ae85..940276f517a4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -27,7 +27,8 @@
#include "wl1271.h"
#include "wl1271_acx.h"
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode);
+int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
+ bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
void wl1271_elp_work(struct work_struct *work);
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index 1f237389d1c7..990960771528 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -62,73 +62,10 @@
#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
-/*
- * Interrupt registers.
- * 64 bit interrupt sources registers ws ced.
- * sme interupts were removed and new ones were added.
- * Order was changed.
- */
-#define FIQ_MASK (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
-#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
-#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
-#define IRQ_MASK (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
-#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
-#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
-#define ECPU_MASK (REGISTERS_BASE + 0x0448)
-#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
-#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
-#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
-#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
-#define INT_STS_ND (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
-#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
-#define INT_ACK (REGISTERS_BASE + 0x046C)
-#define INT_ACK_L (REGISTERS_BASE + 0x046C)
-#define INT_ACK_H (REGISTERS_BASE + 0x0470)
-#define INT_TRIG (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
-#define HOST_STS_L (REGISTERS_BASE + 0x045C)
-#define HOST_STS_H (REGISTERS_BASE + 0x0460)
-#define HOST_MASK (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
-#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
-#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
-/* Host Interrupts*/
-#define HINT_MASK (REGISTERS_BASE + 0x0494)
-#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
-#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
-#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
-/*1150 spec calls this HINT_STS_RAW*/
-#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
-#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
-#define HINT_ACK (REGISTERS_BASE + 0x04A8)
-#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
-
/*=============================================
Host Interrupt Mask Register - 32bit (RW)
------------------------------------------
@@ -433,16 +370,6 @@
/*===============================================
- Phy regs
- ===============================================*/
-#define ACX_PHY_ADDR_REG SBB_ADDR
-#define ACX_PHY_DATA_REG SBB_DATA
-#define ACX_PHY_CTRL_REG SBB_CTL
-#define ACX_PHY_REG_WR_MASK 0x00000001ul
-#define ACX_PHY_REG_RD_MASK 0x00000002ul
-
-
-/*===============================================
EEPROM Read/Write Request 32bit RW
------------------------------------------
1 EE_READ - EEPROM Read Request 1 - Setting this bit
@@ -511,28 +438,6 @@
#define ACX_CONT_WIND_MIN_MASK 0x0000007f
#define ACX_CONT_WIND_MAX 0x03ff0000
-/*
- * Indirect slave register/memory registers
- * ----------------------------------------
- */
-#define HW_SLAVE_REG_ADDR_REG 0x00000004
-#define HW_SLAVE_REG_DATA_REG 0x00000008
-#define HW_SLAVE_REG_CTRL_REG 0x0000000c
-
-#define SLAVE_AUTO_INC 0x00010000
-#define SLAVE_NO_AUTO_INC 0x00000000
-#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
-
-#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
-#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
-#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
-#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
-
-#define HW_FUNC_EVENT_INT_EN 0x8000
-#define HW_FUNC_EVENT_MASK_REG 0x00000034
-
-#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
-
/*===============================================
HI_CFG Interface Configuration Register Values
------------------------------------------
@@ -647,10 +552,6 @@ b12-b0 - Supported Rate indicator bits as defined below.
******************************************************************************/
-#define TNETW1251_CHIP_ID_PG1_0 0x07010101
-#define TNETW1251_CHIP_ID_PG1_1 0x07020101
-#define TNETW1251_CHIP_ID_PG1_2 0x07030101
-
/*************************************************************************
Interrupt Trigger Register (Host -> WiLink)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ca645f38109b..c723d9c7e131 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -21,11 +21,14 @@
*
*/
+#include <linux/gfp.h>
+
#include "wl1271.h"
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_rx.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
u32 drv_rx_counter)
@@ -166,7 +169,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
}
buf = skb_put(skb, length);
- wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
+ wl1271_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) buf;
@@ -210,15 +213,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
wl->rx_mem_pool_addr.addr + 4;
/* Choose the block we want to read */
- wl1271_spi_write(wl, WL1271_SLV_REG_DATA,
- &wl->rx_mem_pool_addr,
- sizeof(wl->rx_mem_pool_addr), false);
+ wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr,
+ sizeof(wl->rx_mem_pool_addr), false);
wl1271_rx_handle_data(wl, buf_size);
wl->rx_counter++;
drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
-
- wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e732..053c84aceb49 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -25,33 +25,12 @@
#include <linux/platform_device.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_spi.h"
-static int wl1271_translate_addr(struct wl1271 *wl, int addr)
-{
- /*
- * To translate, first check to which window of addresses the
- * particular address belongs. Then subtract the starting address
- * of that window from the address. Then, add offset of the
- * translated region.
- *
- * The translated regions occur next to each other in physical device
- * memory, so just add the sizes of the preceeding address regions to
- * get the offset to the new region.
- *
- * Currently, only the two first regions are addressed, and the
- * assumption is that all addresses will fall into either of those
- * two.
- */
- if ((addr >= wl->part.reg.start) &&
- (addr < wl->part.reg.start + wl->part.reg.size))
- return addr - wl->part.reg.start + wl->part.mem.size;
- else
- return addr - wl->part.mem.start;
-}
void wl1271_spi_reset(struct wl1271 *wl)
{
@@ -133,67 +112,6 @@ void wl1271_spi_init(struct wl1271 *wl)
wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
}
-/* Set the SPI partitions to access the chip addresses
- *
- * To simplify driver code, a fixed (virtual) memory map is defined for
- * register and memory addresses. Because in the chipset, in different stages
- * of operation, those addresses will move around, an address translation
- * mechanism is required.
- *
- * There are four partitions (three memory and one register partition),
- * which are mapped to two different areas of the hardware memory.
- *
- * Virtual address
- * space
- *
- * | |
- * ...+----+--> mem.start
- * Physical address ... | |
- * space ... | | [PART_0]
- * ... | |
- * 00000000 <--+----+... ...+----+--> mem.start + mem.size
- * | | ... | |
- * |MEM | ... | |
- * | | ... | |
- * mem.size <--+----+... | | {unused area)
- * | | ... | |
- * |REG | ... | |
- * mem.size | | ... | |
- * + <--+----+... ...+----+--> reg.start
- * reg.size | | ... | |
- * |MEM2| ... | | [PART_1]
- * | | ... | |
- * ...+----+--> reg.start + reg.size
- * | |
- *
- */
-int wl1271_set_partition(struct wl1271 *wl,
- struct wl1271_partition_set *p)
-{
- /* copy partition info */
- memcpy(&wl->part, p, sizeof(*p));
-
- wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
- p->mem.start, p->mem.size);
- wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
- p->reg.start, p->reg.size);
- wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
- p->mem2.start, p->mem2.size);
- wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
- p->mem3.start, p->mem3.size);
-
- /* write partition info to the chipset */
- wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
- wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
- wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
- wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
- wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
- wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
- wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
-
- return 0;
-}
-
#define WL1271_BUSY_WORD_TIMEOUT 1000
/* FIXME: Check busy words, removed due to SPI bug */
@@ -338,78 +256,3 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
}
-
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_read(wl, physical, buf, len, fixed);
-}
-
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_write(wl, physical, buf, len, fixed);
-}
-
-u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
-{
- return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
-}
-
-void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
-{
- wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
-}
-
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
-{
- /* write address >> 1 + 0x30000 to OCP_POR_CTR */
- addr = (addr >> 1) + 0x30000;
- wl1271_spi_write32(wl, OCP_POR_CTR, addr);
-
- /* write value to OCP_POR_WDATA */
- wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
-
- /* write 1 to OCP_CMD */
- wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
-}
-
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
-{
- u32 val;
- int timeout = OCP_CMD_LOOP;
-
- /* write address >> 1 + 0x30000 to OCP_POR_CTR */
- addr = (addr >> 1) + 0x30000;
- wl1271_spi_write32(wl, OCP_POR_CTR, addr);
-
- /* write 2 to OCP_CMD */
- wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
-
- /* poll for data ready */
- do {
- val = wl1271_spi_read32(wl, OCP_DATA_READ);
- timeout--;
- } while (!(val & OCP_READY_MASK) && timeout);
-
- if (!timeout) {
- wl1271_warning("Top register access timed out.");
- return 0xffff;
- }
-
- /* check data status and return if OK */
- if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
- return val & 0xffff;
- else {
- wl1271_warning("Top register access returned error.");
- return 0xffff;
- }
-}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index cb7df1c56314..a803596dad4a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -90,37 +90,7 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed);
-/* Translated target IO */
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
-void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
-
-/* Top Register IO */
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
-
/* INIT and RESET words */
void wl1271_spi_reset(struct wl1271 *wl);
void wl1271_spi_init(struct wl1271 *wl);
-int wl1271_set_partition(struct wl1271 *wl,
- struct wl1271_partition_set *p);
-
-static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
-{
- wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
-
- return wl->buffer_32;
-}
-
-static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
-{
- wl->buffer_32 = val;
- wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
-}
-
#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
new file mode 100644
index 000000000000..5c1c4f565fd8
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -0,0 +1,284 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include "wl1271_testmode.h"
+
+#include <linux/slab.h>
+#include <net/genetlink.h>
+
+#include "wl1271.h"
+#include "wl1271_spi.h"
+#include "wl1271_acx.h"
+
+#define WL1271_TM_MAX_DATA_LENGTH 1024
+
+enum wl1271_tm_commands {
+ WL1271_TM_CMD_UNSPEC,
+ WL1271_TM_CMD_TEST,
+ WL1271_TM_CMD_INTERROGATE,
+ WL1271_TM_CMD_CONFIGURE,
+ WL1271_TM_CMD_NVS_PUSH,
+ WL1271_TM_CMD_SET_PLT_MODE,
+
+ __WL1271_TM_CMD_AFTER_LAST
+};
+#define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1)
+
+enum wl1271_tm_attrs {
+ WL1271_TM_ATTR_UNSPEC,
+ WL1271_TM_ATTR_CMD_ID,
+ WL1271_TM_ATTR_ANSWER,
+ WL1271_TM_ATTR_DATA,
+ WL1271_TM_ATTR_IE_ID,
+ WL1271_TM_ATTR_PLT_MODE,
+
+ __WL1271_TM_ATTR_AFTER_LAST
+};
+#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1)
+
+static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
+ [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 },
+ [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 },
+ [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = WL1271_TM_MAX_DATA_LENGTH },
+ [WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 },
+ [WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 },
+};
+
+
+static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int buf_len, ret, len;
+ struct sk_buff *skb;
+ void *buf;
+ u8 answer = 0;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd test");
+
+ if (!tb[WL1271_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
+
+ if (tb[WL1271_TM_ATTR_ANSWER])
+ answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]);
+
+ if (buf_len > sizeof(struct wl1271_command))
+ return -EMSGSIZE;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_cmd_test(wl, buf, buf_len, answer);
+ mutex_unlock(&wl->mutex);
+
+ if (ret < 0) {
+ wl1271_warning("testmode cmd test failed: %d", ret);
+ return ret;
+ }
+
+ if (answer) {
+ len = nla_total_size(buf_len);
+ skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
+ if (!skb)
+ return -ENOMEM;
+
+ NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
+ ret = cfg80211_testmode_reply(skb);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int ret;
+ struct wl1271_command *cmd;
+ struct sk_buff *skb;
+ u8 ie_id;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate");
+
+ if (!tb[WL1271_TM_ATTR_IE_ID])
+ return -EINVAL;
+
+ ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
+ mutex_unlock(&wl->mutex);
+
+ if (ret < 0) {
+ wl1271_warning("testmode cmd interrogate failed: %d", ret);
+ return ret;
+ }
+
+ skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int buf_len, ret;
+ void *buf;
+ u8 ie_id;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure");
+
+ if (!tb[WL1271_TM_ATTR_DATA])
+ return -EINVAL;
+ if (!tb[WL1271_TM_ATTR_IE_ID])
+ return -EINVAL;
+
+ ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+ buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
+
+ if (buf_len > sizeof(struct wl1271_command))
+ return -EMSGSIZE;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len);
+ mutex_unlock(&wl->mutex);
+
+ if (ret < 0) {
+ wl1271_warning("testmode cmd configure failed: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int ret = 0;
+ size_t len;
+ void *buf;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
+
+ if (!tb[WL1271_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
+ len = nla_len(tb[WL1271_TM_ATTR_DATA]);
+
+ if (len != sizeof(struct wl1271_nvs_file)) {
+ wl1271_error("nvs size is not as expected: %zu != %zu",
+ len, sizeof(struct wl1271_nvs_file));
+ return -EMSGSIZE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ kfree(wl->nvs);
+
+ wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ if (!wl->nvs) {
+ wl1271_error("could not allocate memory for the nvs file");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(wl->nvs, buf, len);
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
+static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
+{
+ u32 val;
+ int ret;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode");
+
+ if (!tb[WL1271_TM_ATTR_PLT_MODE])
+ return -EINVAL;
+
+ val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]);
+
+ switch (val) {
+ case 0:
+ ret = wl1271_plt_stop(wl);
+ break;
+ case 1:
+ ret = wl1271_plt_start(wl);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
+{
+ struct wl1271 *wl = hw->priv;
+ struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+ int err;
+
+ err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[WL1271_TM_ATTR_CMD_ID])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+ case WL1271_TM_CMD_TEST:
+ return wl1271_tm_cmd_test(wl, tb);
+ case WL1271_TM_CMD_INTERROGATE:
+ return wl1271_tm_cmd_interrogate(wl, tb);
+ case WL1271_TM_CMD_CONFIGURE:
+ return wl1271_tm_cmd_configure(wl, tb);
+ case WL1271_TM_CMD_NVS_PUSH:
+ return wl1271_tm_cmd_nvs_push(wl, tb);
+ case WL1271_TM_CMD_SET_PLT_MODE:
+ return wl1271_tm_cmd_set_plt_mode(wl, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.h b/drivers/net/wireless/wl12xx/wl1271_testmode.h
new file mode 100644
index 000000000000..c196d28f9d9d
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.h
@@ -0,0 +1,31 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_TESTMODE_H__
+#define __WL1271_TESTMODE_H__
+
+#include <net/mac80211.h>
+
+int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len);
+
+#endif /* __WL1271_TESTMODE_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 00af065c77c2..811e739d05bf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -26,6 +26,7 @@
#include "wl1271.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_reg.h"
#include "wl1271_ps.h"
#include "wl1271_tx.h"
@@ -87,7 +88,7 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
u32 extra, struct ieee80211_tx_info *control)
{
struct wl1271_tx_hw_descr *desc;
- int pad;
+ int pad, ac;
u16 tx_attr;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -107,9 +108,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
/* configure the tx attributes */
tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
- /* FIXME: do we know the packet priority? can we identify mgmt
- packets, and use max prio for them at least? */
- desc->tid = 0;
+
+ /* queue */
+ ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
+ desc->tid = wl1271_tx_ac_to_tid(ac);
+
desc->aid = TX_HW_DEFAULT_AID;
desc->reserved = 0;
@@ -121,6 +124,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
pad = pad - skb->len;
tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+ /* if the packets are destined for AP (have a STA entry) send them
+ with AP rate policies, otherwise use default basic rates */
+ if (control->control.sta)
+ tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
@@ -158,11 +166,11 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
len = WL1271_TX_ALIGN(skb->len);
/* perform a fixed address block write with the packet */
- wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
+ wl1271_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
/* write packet new counter into the write access register */
wl->tx_packets_count++;
- wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+ wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
desc = (struct wl1271_tx_hw_descr *) skb->data;
wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -196,6 +204,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
ret = wl1271_cmd_set_default_wep_key(wl, idx);
if (ret < 0)
return ret;
+ wl->default_key = idx;
}
}
@@ -214,18 +223,50 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
return ret;
}
+static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+{
+ struct ieee80211_supported_band *band;
+ u32 enabled_rates = 0;
+ int bit;
+
+ band = wl->hw->wiphy->bands[wl->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (rate_set & 0x1)
+ enabled_rates |= band->bitrates[bit].hw_value;
+ rate_set >>= 1;
+ }
+
+ return enabled_rates;
+}
+
void wl1271_tx_work(struct work_struct *work)
{
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
struct sk_buff *skb;
bool woken_up = false;
+ u32 sta_rates = 0;
int ret;
+ /* check if the rates supported by the AP have changed */
+ if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
+ &wl->flags))) {
+ unsigned long flags;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ sta_rates = wl->sta_rate_set;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ /* if rates have changed, re-configure the rate policy */
+ if (unlikely(sta_rates)) {
+ wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
+ wl1271_acx_rate_policies(wl);
+ }
+
while ((skb = skb_dequeue(&wl->tx_queue))) {
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
@@ -240,18 +281,18 @@ void wl1271_tx_work(struct work_struct *work)
wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
"stop queues");
ieee80211_stop_queues(wl->hw);
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
skb_queue_head(&wl->tx_queue, skb);
goto out;
} else if (ret < 0) {
dev_kfree_skb(skb);
goto out;
- } else if (wl->tx_queue_stopped) {
+ } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
+ &wl->flags)) {
/* firmware buffer has space, restart queues */
wl1271_debug(DEBUG_TX,
"complete_packet: waking queues");
ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
}
}
@@ -335,8 +376,8 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
/* read the tx results from the chipset */
- wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result),
- wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ wl1271_read(wl, le32_to_cpu(memmap->tx_result),
+ wl->tx_res_if, sizeof(*wl->tx_res_if), false);
/* verify that the result buffer is not getting overrun */
if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -357,10 +398,10 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
}
/* write host counter to chipset (to ack) */
- wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter),
- le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
+ wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter),
+ le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
}
/* caller must hold wl->mutex */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 416396caf0a0..17e405a09caa 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -123,6 +123,42 @@ struct wl1271_tx_hw_res_if {
struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
} __attribute__ ((packed));
+static inline int wl1271_tx_get_queue(int queue)
+{
+ /* FIXME: use best effort until WMM is enabled */
+ return CONF_TX_AC_BE;
+
+ switch (queue) {
+ case 0:
+ return CONF_TX_AC_VO;
+ case 1:
+ return CONF_TX_AC_VI;
+ case 2:
+ return CONF_TX_AC_BE;
+ case 3:
+ return CONF_TX_AC_BK;
+ default:
+ return CONF_TX_AC_BE;
+ }
+}
+
+/* wl1271 tx descriptor needs the tid and we need to convert it from ac */
+static inline int wl1271_tx_ac_to_tid(int ac)
+{
+ switch (ac) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 6;
+ default:
+ return 0;
+ }
+}
+
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_complete(struct wl1271 *wl, u32 count);
void wl1271_tx_flush(struct wl1271 *wl);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 33c8be7ec8e6..9d1277874645 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
@@ -875,20 +876,18 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
static void zd1201_set_multicast(struct net_device *dev)
{
struct zd1201 *zd = netdev_priv(dev);
- struct dev_mc_list *mc = dev->mc_list;
+ struct dev_mc_list *mc;
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
- if (dev->mc_count > ZD1201_MAXMULTI)
+ if (netdev_mc_count(dev) > ZD1201_MAXMULTI)
return;
- for (i=0; i<dev->mc_count; i++) {
- memcpy(reqbuf+i*ETH_ALEN, mc->dmi_addr, ETH_ALEN);
- mc = mc->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc, dev)
+ memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN);
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
- dev->mc_count*ETH_ALEN, 0);
-
+ netdev_mc_count(dev) * ETH_ALEN, 0);
}
static int zd1201_config_commit(struct net_device *dev,
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index dfa1b9bc22c8..b2af3c549bb3 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include "zd_def.h"
#include "zd_chip.h"
@@ -1325,151 +1326,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
return r;
}
-static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size)
-{
- static const u16 constants[] = {
- 715, 655, 585, 540, 470, 410, 360, 315,
- 270, 235, 205, 175, 150, 125, 105, 85,
- 65, 50, 40, 25, 15
- };
-
- int i;
- u32 x;
-
- /* It seems that their quality parameter is somehow per signal
- * and is now transferred per bit.
- */
- switch (zd_rate) {
- case ZD_OFDM_RATE_6M:
- case ZD_OFDM_RATE_12M:
- case ZD_OFDM_RATE_24M:
- size *= 2;
- break;
- case ZD_OFDM_RATE_9M:
- case ZD_OFDM_RATE_18M:
- case ZD_OFDM_RATE_36M:
- case ZD_OFDM_RATE_54M:
- size *= 4;
- size /= 3;
- break;
- case ZD_OFDM_RATE_48M:
- size *= 3;
- size /= 2;
- break;
- default:
- return -EINVAL;
- }
-
- x = (10000 * status_quality)/size;
- for (i = 0; i < ARRAY_SIZE(constants); i++) {
- if (x > constants[i])
- break;
- }
-
- switch (zd_rate) {
- case ZD_OFDM_RATE_6M:
- case ZD_OFDM_RATE_9M:
- i += 3;
- break;
- case ZD_OFDM_RATE_12M:
- case ZD_OFDM_RATE_18M:
- i += 5;
- break;
- case ZD_OFDM_RATE_24M:
- case ZD_OFDM_RATE_36M:
- i += 9;
- break;
- case ZD_OFDM_RATE_48M:
- case ZD_OFDM_RATE_54M:
- i += 15;
- break;
- default:
- return -EINVAL;
- }
-
- return i;
-}
-
-static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size)
-{
- int r;
-
- r = ofdm_qual_db(status_quality, zd_rate, size);
- ZD_ASSERT(r >= 0);
- if (r < 0)
- r = 0;
-
- r = (r * 100)/29;
- return r <= 100 ? r : 100;
-}
-
-static unsigned int log10times100(unsigned int x)
-{
- static const u8 log10[] = {
- 0,
- 0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
- 104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
- 132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
- 149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
- 161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
- 170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
- 178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
- 185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
- 190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
- 195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
- 200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
- 204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
- 208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
- 211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
- 214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
- 217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
- 220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
- 223, 223, 223, 224, 224, 224, 224,
- };
-
- return x < ARRAY_SIZE(log10) ? log10[x] : 225;
-}
-
-enum {
- MAX_CCK_EVM_DB = 45,
-};
-
-static int cck_evm_db(u8 status_quality)
-{
- return (20 * log10times100(status_quality)) / 100;
-}
-
-static int cck_snr_db(u8 status_quality)
-{
- int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
- ZD_ASSERT(r >= 0);
- return r;
-}
-
-static int cck_qual_percent(u8 status_quality)
-{
- int r;
-
- r = cck_snr_db(status_quality);
- r = (100*r)/17;
- return r <= 100 ? r : 100;
-}
-
static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
{
return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
}
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
- const struct rx_status *status)
-{
- return (status->frame_status&ZD_RX_OFDM) ?
- ofdm_qual_percent(status->signal_quality_ofdm,
- zd_rate_from_ofdm_plcp_header(rx_frame),
- size) :
- cck_qual_percent(status->signal_quality_cck);
-}
-
/**
* zd_rx_rate - report zd-rate
* @rx_frame - received frame
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 9fd8f3508d66..f8bbf7d302ae 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
struct rx_status;
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
- const struct rx_status *status);
-
u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
struct zd_mc_hash {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index cf51e8f8174b..16fa289ad77b 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -22,6 +22,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/jiffies.h>
#include <net/ieee80211_radiotap.h>
@@ -350,7 +351,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
first_idx = info->status.rates[0].idx;
ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
retries = &zd_retry_rates[first_idx];
- ZD_ASSERT(0<=retry && retry<=retries->count);
+ ZD_ASSERT(1 <= retry && retry <= retries->count);
info->status.rates[0].idx = retries->rate[0];
info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
@@ -360,7 +361,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
}
for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
- info->status.rates[i].idx = retries->rate[retry-1];
+ info->status.rates[i].idx = retries->rate[retry - 1];
info->status.rates[i].count = 1; // (success ? 1:2);
}
if (i<IEEE80211_TX_MAX_RATES)
@@ -374,7 +375,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
* zd_mac_tx_failed - callback for failed frames
* @dev: the mac80211 wireless device
*
- * This function is called if a frame couldn't be successfully be
+ * This function is called if a frame couldn't be successfully
* transferred. The first frame from the tx queue, will be selected and
* reported as error to the upper layers.
*/
@@ -424,12 +425,10 @@ void zd_mac_tx_failed(struct urb *urb)
first_idx = info->status.rates[0].idx;
ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
retries = &zd_retry_rates[first_idx];
- if (retry < 0 || retry > retries->count) {
+ if (retry <= 0 || retry > retries->count)
continue;
- }
- ZD_ASSERT(0<=retry && retry<=retries->count);
- final_idx = retries->rate[retry-1];
+ final_idx = retries->rate[retry - 1];
final_rate = zd_rates[final_idx].hw_value;
if (final_rate != tx_status->rate) {
@@ -828,9 +827,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = status->signal_strength;
- stats.qual = zd_rx_qual_percent(buffer,
- length - sizeof(struct rx_status),
- status);
rate = zd_rx_rate(buffer, status);
@@ -872,7 +868,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
}
static int zd_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
@@ -880,22 +876,22 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- mac->type = conf->type;
+ mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
- return zd_write_mac_addr(&mac->chip, conf->mac_addr);
+ return zd_write_mac_addr(&mac->chip, vif->addr);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
@@ -990,12 +986,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- /* changed_flags is always populated but this driver
- * doesn't support all FIF flags so its possible we don't
- * need to do anything */
- if (!changed_flags)
- return;
-
+ /*
+ * If multicast parameter (as returned by zd_op_prepare_multicast)
+ * has changed, no bit in changed_flags is set. To handle this
+ * situation, we do not return if changed_flags is 0. If we do so,
+ * we will have some issue with IPv6 which uses multicast for link
+ * layer address resolution.
+ */
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index 439799b84876..9e74eb1b67d5 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -19,6 +19,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include "zd_rf.h"
#include "zd_usb.h"
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ac19ecd19cfe..d91ad1a612af 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
@@ -62,6 +63,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
@@ -1078,11 +1080,15 @@ static int eject_installer(struct usb_interface *intf)
int r;
/* Find bulk out endpoint */
- endpoint = &iface_desc->endpoint[1].desc;
- if (usb_endpoint_dir_out(endpoint) &&
- usb_endpoint_xfer_bulk(endpoint)) {
- bulk_out_ep = endpoint->bEndpointAddress;
- } else {
+ for (r = 1; r >= 0; r--) {
+ endpoint = &iface_desc->endpoint[r].desc;
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ bulk_out_ep = endpoint->bEndpointAddress;
+ break;
+ }
+ }
+ if (r == -1) {
dev_err(&udev->dev,
"zd1211rw: Could not find bulk out endpoint\n");
return -ENODEV;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a869b45d3d37..d504e2b60257 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -40,6 +40,7 @@
#include <linux/udp.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <net/ip.h>
#include <xen/xen.h>
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 8c777ba4e2b3..1e783ccc306e 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -19,14 +19,21 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
+#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
+#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
+#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
#define XEL_TSR_OFFSET 0x07FC /* Tx status */
#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
@@ -37,6 +44,22 @@
#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
+/* MDIO Address Register Bit Masks */
+#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
+#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
+#define XEL_MDIOADDR_PHYADR_SHIFT 5
+#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
+
+/* MDIO Write Data Register Bit Masks */
+#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
+
+/* MDIO Read Data Register Bit Masks */
+#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
+
+/* MDIO Control Register Bit Masks */
+#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
+#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
+
/* Global Interrupt Enable Register (GIER) Bit Masks */
#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
@@ -87,6 +110,12 @@
* @reset_lock: lock used for synchronization
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
+ * @phy_dev: pointer to the PHY device
+ * @phy_node: pointer to the PHY device node
+ * @mii_bus: pointer to the MII bus
+ * @mdio_irqs: IRQs table for MDIO bus
+ * @last_link: last link status
+ * @has_mdio: indicates whether MDIO is included in the HW
*/
struct net_local {
@@ -100,6 +129,15 @@ struct net_local {
spinlock_t reset_lock;
struct sk_buff *deferred_skb;
+
+ struct phy_device *phy_dev;
+ struct device_node *phy_node;
+
+ struct mii_bus *mii_bus;
+ int mdio_irqs[PHY_MAX_ADDR];
+
+ int last_link;
+ bool has_mdio;
};
@@ -431,7 +469,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
}
/**
- * xemaclite_set_mac_address - Set the MAC address for this device
+ * xemaclite_update_address - Update the MAC address in the device
* @drvdata: Pointer to the Emaclite device private data
* @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
*
@@ -441,8 +479,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
* The MAC address can be programmed using any of the two transmit
* buffers (if configured).
*/
-static void xemaclite_set_mac_address(struct net_local *drvdata,
- u8 *address_ptr)
+static void xemaclite_update_address(struct net_local *drvdata,
+ u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
@@ -465,6 +503,30 @@ static void xemaclite_set_mac_address(struct net_local *drvdata,
}
/**
+ * xemaclite_set_mac_address - Set the MAC address for this device
+ * @dev: Pointer to the network device instance
+ * @addr: Void pointer to the sockaddr structure
+ *
+ * This function copies the HW address from the sockaddr strucutre to the
+ * net_device structure and updates the address in HW.
+ *
+ * Return: Error if the net device is busy or 0 if the addr is set
+ * successfully
+ */
+static int xemaclite_set_mac_address(struct net_device *dev, void *address)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct sockaddr *addr = address;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ xemaclite_update_address(lp, dev->dev_addr);
+ return 0;
+}
+
+/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
*
@@ -641,12 +703,219 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/**********************/
+/* MDIO Bus functions */
+/**********************/
+
+/**
+ * xemaclite_mdio_wait - Wait for the MDIO to be ready to use
+ * @lp: Pointer to the Emaclite device private data
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request.
+ *
+ * Return: 0 for success or ETIMEDOUT for a timeout
+ */
+
+static int xemaclite_mdio_wait(struct net_local *lp)
+{
+ long end = jiffies + 2;
+
+ /* wait for the MDIO interface to not be busy or timeout
+ after some time.
+ */
+ while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ XEL_MDIOCTRL_MDIOSTS_MASK) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_read - Read from a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to read from
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request and then writes the phy address to the MDIO Address register
+ * and reads data from MDIO Read Data register, when its available.
+ *
+ * Return: Value read from the MII management register
+ */
+static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+ u32 rc;
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and set the OP bit in the
+ * MDIO Address register. Set the Status bit in the MDIO Control
+ * register to start a MDIO read transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+/**
+ * xemaclite_mdio_write - Write to a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to write to
+ * @val: value to write to the register number specified by reg
+ *
+ * This fucntion waits till the device is ready to accept a new MDIO
+ * request and then writes the val to the MDIO Write Data register.
+ */
+static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and clear the OP bit in the
+ * MDIO Address register and then write the value into the MDIO Write
+ * Data register. Finally, set the Status bit in the MDIO Control
+ * register to start a MDIO write transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ ~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_reset - Reset the mdio bus.
+ * @bus: Pointer to the MII bus
+ *
+ * This function is required(?) as per Documentation/networking/phy.txt.
+ * There is no reset in this device; this function always returns 0.
+ */
+static int xemaclite_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
+ * @lp: Pointer to the Emaclite device private data
+ * @ofdev: Pointer to OF device structure
+ *
+ * This function enables MDIO bus in the Emaclite device and registers a
+ * mii_bus.
+ *
+ * Return: 0 upon success or a negative error upon failure
+ */
+static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
+{
+ struct mii_bus *bus;
+ int rc;
+ struct resource res;
+ struct device_node *np = of_get_parent(lp->phy_node);
+
+ /* Don't register the MDIO bus if the phy_node or its parent node
+ * can't be found.
+ */
+ if (!np)
+ return -ENODEV;
+
+ /* Enable the MDIO bus by asserting the enable bit in MDIO Control
+ * register.
+ */
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ XEL_MDIOCTRL_MDIOEN_MASK);
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = lp;
+ bus->name = "Xilinx Emaclite MDIO";
+ bus->read = xemaclite_mdio_read;
+ bus->write = xemaclite_mdio_write;
+ bus->reset = xemaclite_mdio_reset;
+ bus->parent = dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+
+ lp->mii_bus = bus;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ mdiobus_free(bus);
+ return rc;
+}
+
+/**
+ * xemaclite_adjust_link - Link state callback for the Emaclite device
+ * @ndev: pointer to net_device struct
+ *
+ * There's nothing in the Emaclite device to be configured when the link
+ * state changes. We just print the status.
+ */
+void xemaclite_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+ int link_state;
+
+ /* hash together the state values to decide if something has changed */
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+
+ if (lp->last_link != link_state) {
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ }
+}
+
/**
* xemaclite_open - Open the network device
* @dev: Pointer to the network device
*
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
+ * It also connects to the phy device, if MDIO is included in Emaclite device.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -656,14 +925,47 @@ static int xemaclite_open(struct net_device *dev)
/* Just to be safe, stop the device first */
xemaclite_disable_interrupts(lp);
+ if (lp->phy_node) {
+ u32 bmcr;
+
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ xemaclite_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (!lp->phy_dev) {
+ dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ /* EmacLite doesn't support giga-bit speeds */
+ lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
+ lp->phy_dev->advertising = lp->phy_dev->supported;
+
+ /* Don't advertise 1000BASE-T Full/Half duplex speeds */
+ phy_write(lp->phy_dev, MII_CTRL1000, 0);
+
+ /* Advertise only 10 and 100mbps full/half duplex speeds */
+ phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+
+ /* Restart auto negotiation */
+ bmcr = phy_read(lp->phy_dev, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(lp->phy_dev, MII_BMCR, bmcr);
+
+ phy_start(lp->phy_dev);
+ }
+
/* Set the MAC address each time opened */
- xemaclite_set_mac_address(lp, dev->dev_addr);
+ xemaclite_update_address(lp, dev->dev_addr);
/* Grab the IRQ */
retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
if (retval) {
dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
dev->irq);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return retval;
}
@@ -682,6 +984,7 @@ static int xemaclite_open(struct net_device *dev)
*
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
+ * It also disconnects the phy device associated with the Emaclite device.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -691,6 +994,10 @@ static int xemaclite_close(struct net_device *dev)
xemaclite_disable_interrupts(lp);
free_irq(dev->irq, dev);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return 0;
}
@@ -754,42 +1061,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
}
/**
- * xemaclite_ioctl - Perform IO Control operations on the network device
- * @dev: Pointer to the network device
- * @rq: Pointer to the interface request structure
- * @cmd: IOCTL command
- *
- * The only IOCTL operation supported by this function is setting the MAC
- * address. An error is reported if any other operations are requested.
- *
- * Return: 0 to indicate success, or a negative error for failure.
- */
-static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
- struct hw_addr_data *hw_addr = (struct hw_addr_data *) &rq->ifr_hwaddr;
-
- switch (cmd) {
- case SIOCETHTOOL:
- return -EIO;
-
- case SIOCSIFHWADDR:
- dev_err(&lp->ndev->dev, "SIOCSIFHWADDR\n");
-
- /* Copy MAC address in from user space */
- copy_from_user((void __force *) dev->dev_addr,
- (void __user __force *) hw_addr,
- IFHWADDRLEN);
- xemaclite_set_mac_address(lp, dev->dev_addr);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-/**
* xemaclite_remove_ndev - Free the network device
* @ndev: Pointer to the network device to be freed
*
@@ -840,6 +1111,8 @@ static struct net_device_ops xemaclite_netdev_ops;
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
* address and registers the network device.
+ * It also registers a mii_bus for the Emaclite device, if MDIO is included
+ * in the device.
*
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
@@ -880,6 +1153,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
}
dev_set_drvdata(dev, ndev);
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
ndev->irq = r_irq.start;
ndev->mem_start = r_mem.start;
@@ -923,13 +1197,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
/* Set the MAC address in the EmacLite device */
- xemaclite_set_mac_address(lp, ndev->dev_addr);
+ xemaclite_update_address(lp, ndev->dev_addr);
- dev_info(dev,
- "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
- ndev->dev_addr[0], ndev->dev_addr[1],
- ndev->dev_addr[2], ndev->dev_addr[3],
- ndev->dev_addr[4], ndev->dev_addr[5]);
+ lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
+ rc = xemaclite_mdio_setup(lp, &ofdev->dev);
+ if (rc)
+ dev_warn(&ofdev->dev, "error registering MDIO bus\n");
+
+ dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
ndev->flags &= ~IFF_MULTICAST;
@@ -972,12 +1247,25 @@ static int __devexit xemaclite_of_remove(struct of_device *of_dev)
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+
+ /* Un-register the mii_bus, if configured */
+ if (lp->has_mdio) {
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+ }
+
unregister_netdev(ndev);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+
release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
xemaclite_remove_ndev(ndev);
-
dev_set_drvdata(dev, NULL);
return 0;
@@ -987,7 +1275,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
- .ndo_do_ioctl = xemaclite_ioctl,
+ .ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
.ndo_get_stats = xemaclite_get_stats,
};
@@ -999,6 +1287,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
+ { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index 389ba9df7120..fdba9cb3a599 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -20,11 +20,11 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -33,6 +33,7 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/pgtable.h>
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff2..ede5b2436f22 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -23,12 +23,12 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "yellowfin"
#define DRV_VERSION "2.1"
#define DRV_RELDATE "Sep 11, 2006"
-#define PFX DRV_NAME ": "
-
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -90,7 +90,6 @@ static int gx_fix;
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
@@ -237,7 +236,7 @@ static const struct pci_id_info pci_id_tbl[] = {
{ }
};
-static const struct pci_device_id yellowfin_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ }
@@ -399,7 +398,7 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(*np));
if (!dev) {
- printk (KERN_ERR PFX "cannot allocate ethernet device\n");
+ pr_err("cannot allocate ethernet device\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -487,10 +486,10 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
if (i)
goto err_out_unmap_status;
- printk(KERN_INFO "%s: %s type %8x at %p, %pM, IRQ %d.\n",
- dev->name, pci_id_tbl[chip_idx].name,
- ioread32(ioaddr + ChipRev), ioaddr,
- dev->dev_addr, irq);
+ netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name,
+ ioread32(ioaddr + ChipRev), ioaddr,
+ dev->dev_addr, irq);
if (np->drv_flags & HasMII) {
int phy, phy_idx = 0;
@@ -499,9 +498,8 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
if (mii_status != 0xffff && mii_status != 0x0000) {
np->phys[phy_idx++] = phy;
np->advertising = mdio_read(ioaddr, phy, 4);
- printk(KERN_INFO "%s: MII PHY found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, phy, mii_status, np->advertising);
+ netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
+ phy, mii_status, np->advertising);
}
}
np->mii_cnt = phy_idx;
@@ -584,8 +582,8 @@ static int yellowfin_open(struct net_device *dev)
return ret;
if (yellowfin_debug > 1)
- printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
- dev->name, dev->irq);
+ netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
+ __func__, dev->irq);
ret = yellowfin_init_ring(dev);
if (ret) {
@@ -642,8 +640,7 @@ static int yellowfin_open(struct net_device *dev)
iowrite32(0x80008000, ioaddr + TxCtrl);
if (yellowfin_debug > 2) {
- printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
- dev->name);
+ netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
}
/* Set the timer to check for link beat. */
@@ -664,8 +661,8 @@ static void yellowfin_timer(unsigned long data)
int next_tick = 60*HZ;
if (yellowfin_debug > 3) {
- printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
- dev->name, ioread16(ioaddr + IntrStatus));
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
+ ioread16(ioaddr + IntrStatus));
}
if (yp->mii_cnt) {
@@ -673,9 +670,8 @@ static void yellowfin_timer(unsigned long data)
int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
int negotiated = lpa & yp->advertising;
if (yellowfin_debug > 1)
- printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
- "link partner capability %4.4x.\n",
- dev->name, yp->phys[0], bmsr, lpa);
+ netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
+ yp->phys[0], bmsr, lpa);
yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
@@ -696,25 +692,24 @@ static void yellowfin_tx_timeout(struct net_device *dev)
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
- printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
- "status %4.4x, Rx status %4.4x, resetting...\n",
- dev->name, yp->cur_tx, yp->dirty_tx,
- ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
+ netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
+ yp->cur_tx, yp->dirty_tx,
+ ioread32(ioaddr + TxStatus),
+ ioread32(ioaddr + RxStatus));
/* Note: these should be KERN_DEBUG. */
if (yellowfin_debug) {
int i;
- printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
+ pr_warning(" Rx ring %p: ", yp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x",
- yp->rx_ring[i].result_status);
- printk(KERN_CONT "\n");
- printk(KERN_WARNING" Tx ring %p: ", yp->tx_ring);
+ pr_cont(" %08x", yp->rx_ring[i].result_status);
+ pr_cont("\n");
+ pr_warning(" Tx ring %p: ", yp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_CONT " %4.4x /%8.8x",
+ pr_cont(" %04x /%08x",
yp->tx_status[i].tx_errs,
yp->tx_ring[i].result_status);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
/* If the hardware is found to hang regularly, we will update the code
@@ -891,8 +886,8 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
yp->tx_full = 1;
if (yellowfin_debug > 4) {
- printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
- dev->name, yp->cur_tx, entry);
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
+ yp->cur_tx, entry);
}
return NETDEV_TX_OK;
}
@@ -916,8 +911,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
u16 intr_status = ioread16(ioaddr + IntrClear);
if (yellowfin_debug > 4)
- printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
- dev->name, intr_status);
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
+ intr_status);
if (intr_status == 0)
break;
@@ -963,13 +958,12 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (yellowfin_debug > 5)
- printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
- "%4.4x %4.4x %4.4x %4.4x.\n",
- dev->name, entry,
- yp->tx_status[entry].tx_cnt,
- yp->tx_status[entry].tx_errs,
- yp->tx_status[entry].total_tx_cnt,
- yp->tx_status[entry].paused);
+ netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
+ entry,
+ yp->tx_status[entry].tx_cnt,
+ yp->tx_status[entry].tx_errs,
+ yp->tx_status[entry].total_tx_cnt,
+ yp->tx_status[entry].paused);
#endif
if (tx_errs == 0)
break; /* It still hasn't been Txed */
@@ -978,8 +972,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
/* There was an major error, log it. */
#ifndef final_version
if (yellowfin_debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
- dev->name, tx_errs);
+ netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
+ tx_errs);
#endif
dev->stats.tx_errors++;
if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
@@ -989,8 +983,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
} else {
#ifndef final_version
if (yellowfin_debug > 4)
- printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
- dev->name, tx_errs);
+ netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
+ tx_errs);
#endif
dev->stats.tx_bytes += skb->len;
dev->stats.collisions += tx_errs & 15;
@@ -1008,8 +1002,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
+ netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, yp->cur_tx, yp->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -1031,16 +1025,15 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
yellowfin_error(dev, intr_status);
if (--boguscnt < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n",
- dev->name, intr_status);
+ netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
+ intr_status);
break;
}
} while (1);
if (yellowfin_debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread16(ioaddr + IntrStatus));
+ netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
+ ioread16(ioaddr + IntrStatus));
spin_unlock (&yp->lock);
return IRQ_RETVAL(handled);
@@ -1055,9 +1048,9 @@ static int yellowfin_rx(struct net_device *dev)
int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
if (yellowfin_debug > 4) {
- printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
+ printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
entry, yp->rx_ring[entry].result_status);
- printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
+ printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
yp->rx_ring[entry].result_status);
}
@@ -1081,20 +1074,20 @@ static int yellowfin_rx(struct net_device *dev)
le32_to_cpu(desc->result_status)) & 0xffff;
frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
if (yellowfin_debug > 4)
- printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
- frame_status);
+ printk(KERN_DEBUG " %s() status was %04x\n",
+ __func__, frame_status);
if (--boguscnt < 0)
break;
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
- " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
+ netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
+ desc_status, data_size);
dev->stats.rx_length_errors++;
} else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
/* There was a error. */
if (yellowfin_debug > 3)
- printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
- frame_status);
+ printk(KERN_DEBUG " %s() Rx error was %04x\n",
+ __func__, frame_status);
dev->stats.rx_errors++;
if (frame_status & 0x0060) dev->stats.rx_length_errors++;
if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
@@ -1118,8 +1111,8 @@ static int yellowfin_rx(struct net_device *dev)
entry*sizeof(struct yellowfin_desc)),
"\377\377\377\377\377\377", 6) != 0) {
if (bogus_rx++ == 0)
- printk(KERN_WARNING "%s: Bad frame to %pM\n",
- dev->name, buf_addr);
+ netdev_warn(dev, "Bad frame to %pM\n",
+ buf_addr);
#endif
} else {
struct sk_buff *skb;
@@ -1129,9 +1122,8 @@ static int yellowfin_rx(struct net_device *dev)
#ifndef final_version
if (yellowfin_debug > 4)
- printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
- " of %d, bogus_cnt %d.\n",
- pkt_len, data_size, boguscnt);
+ printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
+ __func__, pkt_len, data_size, boguscnt);
#endif
/* Check if the packet is long enough to just pass up the skbuff
without copying to a properly sized skbuff. */
@@ -1191,8 +1183,7 @@ static int yellowfin_rx(struct net_device *dev)
static void yellowfin_error(struct net_device *dev, int intr_status)
{
- printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
- dev->name, intr_status);
+ netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
/* Hmmmmm, it's not clear what to do here. */
if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
dev->stats.tx_errors++;
@@ -1209,13 +1200,13 @@ static int yellowfin_close(struct net_device *dev)
netif_stop_queue (dev);
if (yellowfin_debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
- "Rx %4.4x Int %2.2x.\n",
- dev->name, ioread16(ioaddr + TxStatus),
- ioread16(ioaddr + RxStatus),
- ioread16(ioaddr + IntrStatus));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
- dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
+ netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
+ ioread16(ioaddr + TxStatus),
+ ioread16(ioaddr + RxStatus),
+ ioread16(ioaddr + IntrStatus));
+ netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ yp->cur_tx, yp->dirty_tx,
+ yp->cur_rx, yp->dirty_rx);
}
/* Disable interrupts by clearing the interrupt mask. */
@@ -1229,33 +1220,35 @@ static int yellowfin_close(struct net_device *dev)
#if defined(__i386__)
if (yellowfin_debug > 2) {
- printk(KERN_DEBUG" Tx ring at %8.8llx:\n",
+ printk(KERN_DEBUG " Tx ring at %08llx:\n",
(unsigned long long)yp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE*2; i++)
- printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
+ printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
- printk(KERN_DEBUG " Rx ring %8.8llx:\n",
+ printk(KERN_DEBUG " Rx ring %08llx:\n",
(unsigned long long)yp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
- printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
+ printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
yp->rx_ring[i].result_status);
if (yellowfin_debug > 6) {
if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
int j;
+
+ printk(KERN_DEBUG);
for (j = 0; j < 0x50; j++)
- printk(" %4.4x",
- get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
- printk("\n");
+ pr_cont(" %04x",
+ get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
+ pr_cont("\n");
}
}
}
@@ -1281,8 +1274,8 @@ static int yellowfin_close(struct net_device *dev)
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
if (yellowfin_debug > 0) {
- printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
- dev->name, bogus_rx);
+ netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
+ bogus_rx);
}
#endif
@@ -1301,16 +1294,17 @@ static void set_rx_mode(struct net_device *dev)
iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
iowrite16(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 64) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
struct dev_mc_list *mclist;
u16 hash_table[4];
int i;
+
memset(hash_table, 0, sizeof(hash_table));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit;
/* Due to a bug in the early chip versions, multiple filter
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index bc5ae0f6e934..dbfef8d70f2d 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -88,6 +88,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -313,7 +314,8 @@ static void znet_set_multicast_list (struct net_device *dev)
/* Byte D */
cfblk->dummy_1 = 1; /* set to 1 */
cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
- cfblk->mc_all = (dev->mc_list || (dev->flags&IFF_ALLMULTI));/* multicast all mode */
+ cfblk->mc_all = (!netdev_mc_empty(dev) ||
+ (dev->flags & IFF_ALLMULTI)); /* multicast all mode */
cfblk->rcv_mon = 0; /* Monitor mode disabled */
cfblk->frag_acpt = 0; /* Do not accept fragments */
cfblk->tstrttrs = 0; /* No start transmission threshold */